From 9be33d7c899509d24f68ca41bb33ca49bf254272 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Fri, 21 Apr 2023 23:57:35 +0000 Subject: [PATCH 01/37] Adding BaseWriteable and ProtobufWriteable Signed-off-by: Vacha Shah --- .../common/io/stream/BaseWriteable.java | 53 +++++++++++++ .../common/io/stream/ProtobufWriteable.java | 76 +++++++++++++++++++ 2 files changed, 129 insertions(+) create mode 100644 server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java create mode 100644 server/src/main/java/org/opensearch/common/io/stream/ProtobufWriteable.java diff --git a/server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java b/server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java new file mode 100644 index 0000000000000..7d7fa18bb6afa --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.io.stream; + +import java.io.IOException; + +/** + * This interface can be extended to different types of serialization and deserialization mechanisms. + * + * @opensearch.internal + */ +public interface BaseWriteable { + + /** + * Write this into the stream output. + */ + void writeTo(T out) throws IOException; + + /** + * Reference to a method that can write some object to a given type. + */ + @FunctionalInterface + interface Writer { + + /** + * Write {@code V}-type {@code value} to the {@code T}-type stream. + * + * @param out Output to write the {@code value} too + * @param value The value to add + */ + void write(T out, V value) throws IOException; + } + + /** + * Reference to a method that can read some object from a given stream type. + */ + @FunctionalInterface + interface Reader { + + /** + * Read {@code V}-type value from a {@code T}-type stream. + * + * @param in Input to read the value from + */ + V read(S in) throws IOException; + } +} diff --git a/server/src/main/java/org/opensearch/common/io/stream/ProtobufWriteable.java b/server/src/main/java/org/opensearch/common/io/stream/ProtobufWriteable.java new file mode 100644 index 0000000000000..86f71cf6e5bf2 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/stream/ProtobufWriteable.java @@ -0,0 +1,76 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.common.io.stream; + +import java.io.IOException; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + +/** + * Implementers can be written to write to output and read from input using Protobuf. +* +* @opensearch.internal +*/ +public interface ProtobufWriteable extends BaseWriteable { + + /** + * Write this into the stream output. + */ + public void writeTo(CodedOutputStream out) throws IOException; + + /** + * Reference to a method that can write some object to a {@link CodedOutputStream}. + * Most classes should implement {@link ProtobufWriteable} and the {@link ProtobufWriteable#writeTo(CodedOutputStream)} method should use + * {@link CodedOutputStream} methods directly or this indirectly: + *

+     * public void writeTo(CodedOutputStream out) throws IOException {
+     *     out.writeVInt(someValue);
+     * }
+     * 
+ */ + @FunctionalInterface + interface Writer { + + /** + * Write {@code V}-type {@code value} to the {@code out}put stream. + * + * @param out Output to write the {@code value} too + * @param value The value to add + */ + void write(CodedOutputStream out, V value) throws IOException; + + } + + /** + * Reference to a method that can read some object from a stream. By convention this is a constructor that takes + * {@linkplain CodedInputStream} as an argument for most classes and a static method for things like enums. + *

+     * public MyClass(final CodedInputStream in) throws IOException {
+     *     this.someValue = in.readVInt();
+     * }
+     * 
+ */ + @FunctionalInterface + interface Reader { + + /** + * Read {@code V}-type value from a stream. + * + * @param in Input to read the value from + */ + V read(CodedInputStream in) throws IOException; + + } + +} From 0034b0a5b3e88df911e3a99828ef1b77cfe0a3d2 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Fri, 21 Apr 2023 23:58:21 +0000 Subject: [PATCH 02/37] Adding proto messages for TaskId and TaskResourceStats Signed-off-by: Vacha Shah --- server/src/main/proto/tasks/TaskIdProto.proto | 20 ++++++++++++++++ .../proto/tasks/TaskResourceStatsProto.proto | 23 +++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 server/src/main/proto/tasks/TaskIdProto.proto create mode 100644 server/src/main/proto/tasks/TaskResourceStatsProto.proto diff --git a/server/src/main/proto/tasks/TaskIdProto.proto b/server/src/main/proto/tasks/TaskIdProto.proto new file mode 100644 index 0000000000000..c2d34ca235c28 --- /dev/null +++ b/server/src/main/proto/tasks/TaskIdProto.proto @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.tasks.proto; + +option java_outer_classname = "TaskIdProto"; + +message TaskId { + string nodeId = 1; + int64 id = 2; +} diff --git a/server/src/main/proto/tasks/TaskResourceStatsProto.proto b/server/src/main/proto/tasks/TaskResourceStatsProto.proto new file mode 100644 index 0000000000000..0e2462ba51a73 --- /dev/null +++ b/server/src/main/proto/tasks/TaskResourceStatsProto.proto @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.tasks.proto; + +option java_outer_classname = "TaskResourceStatsProto"; + +message TaskResourceStats { + message TaskResourceUsage { + int64 cpuTimeInNanos = 1; + int64 memoryInBytes = 2; + } + map resourceUsage = 1; +} From cdb6806d982835b925181d764378eb8d3c00e1b9 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Fri, 21 Apr 2023 23:58:53 +0000 Subject: [PATCH 03/37] Adding Task related classes with protobuf integration Signed-off-by: Vacha Shah --- .../tasks/ProtobufCancellableTask.java | 91 ++++ .../org/opensearch/tasks/ProtobufTask.java | 446 ++++++++++++++++++ .../tasks/ProtobufTaskAwareRequest.java | 50 ++ .../org/opensearch/tasks/ProtobufTaskId.java | 92 ++++ .../opensearch/tasks/ProtobufTaskInfo.java | 231 +++++++++ .../tasks/ProtobufTaskResourceStats.java | 48 ++ .../opensearch/tasks/ProtobufTaskResult.java | 226 +++++++++ 7 files changed, 1184 insertions(+) create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTask.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java b/server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java new file mode 100644 index 0000000000000..9e47da0265e86 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java @@ -0,0 +1,91 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import org.opensearch.common.Nullable; +import org.opensearch.common.unit.TimeValue; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.opensearch.search.SearchService.NO_TIMEOUT; + +/** + * A task that can be canceled +* +* @opensearch.internal +*/ +public abstract class ProtobufCancellableTask extends ProtobufTask { + + private volatile String reason; + private final AtomicBoolean cancelled = new AtomicBoolean(false); + private final TimeValue cancelAfterTimeInterval; + + public ProtobufCancellableTask(long id, String type, String action, String description, ProtobufTaskId parentTaskId, Map headers) { + this(id, type, action, description, parentTaskId, headers, NO_TIMEOUT); + } + + public ProtobufCancellableTask( + long id, + String type, + String action, + String description, + ProtobufTaskId parentTaskId, + Map headers, + TimeValue cancelAfterTimeInterval + ) { + super(id, type, action, description, parentTaskId, headers); + this.cancelAfterTimeInterval = cancelAfterTimeInterval; + } + + /** + * This method is called by the task manager when this task is cancelled. + */ + public void cancel(String reason) { + assert reason != null; + if (cancelled.compareAndSet(false, true)) { + this.reason = reason; + onCancelled(); + } + } + + /** + * Returns true if this task should be automatically cancelled if the coordinating node that + * requested this task left the cluster. + */ + public boolean cancelOnParentLeaving() { + return true; + } + + /** + * Returns true if this task can potentially have children that need to be cancelled when it parent is cancelled. + */ + public abstract boolean shouldCancelChildrenOnCancellation(); + + public boolean isCancelled() { + return cancelled.get(); + } + + public TimeValue getCancellationTimeout() { + return cancelAfterTimeInterval; + } + + /** + * The reason the task was cancelled or null if it hasn't been cancelled. + */ + @Nullable + public final String getReasonCancelled() { + return reason; + } + + /** + * Called after the task is cancelled so that it can take any actions that it has to take. + */ + protected void onCancelled() {} +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTask.java b/server/src/main/java/org/opensearch/tasks/ProtobufTask.java new file mode 100644 index 0000000000000..dd3a446a0d120 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTask.java @@ -0,0 +1,446 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.ActionResponse; +import org.opensearch.action.NotifyOnceListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.NamedWriteable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Current task information +* +* @opensearch.internal +*/ +public class ProtobufTask { + + private static final Logger logger = LogManager.getLogger(ProtobufTask.class); + + /** + * The request header to mark tasks with specific ids + */ + public static final String X_OPAQUE_ID = "X-Opaque-Id"; + + private static final String TOTAL = "total"; + + private final long id; + + private final String type; + + private final String action; + + private final String description; + + private final ProtobufTaskId parentTask; + + private final Map headers; + + private final Map> resourceStats; + + private final List> resourceTrackingCompletionListeners; + + /** + * Keeps track of the number of active resource tracking threads for this task. It is initialized to 1 to track + * the task's own/self thread. When this value becomes 0, all threads have been marked inactive and the resource + * tracking can be stopped for this task. + */ + private final AtomicInteger numActiveResourceTrackingThreads = new AtomicInteger(1); + + /** + * The task's start time as a wall clock time since epoch ({@link System#currentTimeMillis()} style). + */ + private final long startTime; + + /** + * The task's start time as a relative time ({@link System#nanoTime()} style). + */ + private final long startTimeNanos; + + public ProtobufTask(long id, String type, String action, String description, ProtobufTaskId parentTask, Map headers) { + this( + id, + type, + action, + description, + parentTask, + System.currentTimeMillis(), + System.nanoTime(), + headers, + new ConcurrentHashMap<>(), + new ArrayList<>() + ); + } + + public ProtobufTask( + long id, + String type, + String action, + String description, + ProtobufTaskId parentTask, + long startTime, + long startTimeNanos, + Map headers, + ConcurrentHashMap> resourceStats, + List> resourceTrackingCompletionListeners + ) { + this.id = id; + this.type = type; + this.action = action; + this.description = description; + this.parentTask = parentTask; + this.startTime = startTime; + this.startTimeNanos = startTimeNanos; + this.headers = headers; + this.resourceStats = resourceStats; + this.resourceTrackingCompletionListeners = resourceTrackingCompletionListeners; + } + + /** + * Build a version of the task status you can throw over the wire and back + * to the user. + * + * @param localNodeId + * the id of the node this task is running on + * @param detailed + * should the information include detailed, potentially slow to + * generate data? + */ + public final ProtobufTaskInfo taskInfo(String localNodeId, boolean detailed) { + return taskInfo(localNodeId, detailed, detailed == false); + } + + /** + * Build a version of the task status you can throw over the wire and back + * with the option to include resource stats or not. + * This method is only used during creating TaskResult to avoid storing resource information into the task index. + * + * @param excludeStats should information exclude resource stats. + * By default, detailed flag is used to control including resource information. + * But inorder to avoid storing resource stats into task index as strict mapping is enforced and breaks when adding this field. + * In the future, task-index-mapping.json can be modified to add resource stats. + */ + private ProtobufTaskInfo taskInfo(String localNodeId, boolean detailed, boolean excludeStats) { + String description = null; + ProtobufTask.Status status = null; + ProtobufTaskResourceStats resourceStats = null; + if (detailed) { + description = getDescription(); + status = getStatus(); + } + if (excludeStats == false) { + resourceStats = new ProtobufTaskResourceStats(new HashMap<>() { + { + put(TOTAL, getTotalResourceStats()); + } + }); + } + return taskInfo(localNodeId, description, status, resourceStats); + } + + /** + * Build a {@link ProtobufTaskInfo} for this task without resource stats. + */ + protected final ProtobufTaskInfo taskInfo(String localNodeId, String description, Status status) { + return taskInfo(localNodeId, description, status, null); + } + + /** + * Build a proper {@link ProtobufTaskInfo} for this task. + */ + protected final ProtobufTaskInfo taskInfo(String localNodeId, String description, Status status, TaskResourceStats resourceStats) { + return new ProtobufTaskInfo( + new ProtobufTaskId(localNodeId, getId()), + getType(), + getAction(), + description, + status, + startTime, + System.nanoTime() - startTimeNanos, + this instanceof ProtobufCancellableTask, + this instanceof ProtobufCancellableTask && ((ProtobufCancellableTask) this).isCancelled(), + parentTask, + headers, + resourceStats + ); + } + + /** + * Returns task id + */ + public long getId() { + return id; + } + + /** + * Returns task channel type (netty, transport, direct) + */ + public String getType() { + return type; + } + + /** + * Returns task action + */ + public String getAction() { + return action; + } + + /** + * Generates task description + */ + public String getDescription() { + return description; + } + + /** + * Returns the task's start time as a wall clock time since epoch ({@link System#currentTimeMillis()} style). + */ + public long getStartTime() { + return startTime; + } + + /** + * Returns the task's start time in nanoseconds ({@link System#nanoTime()} style). + */ + public long getStartTimeNanos() { + return startTimeNanos; + } + + /** + * Returns id of the parent task or NO_PARENT_ID if the task doesn't have any parent tasks + */ + public ProtobufTaskId getParentTaskId() { + return parentTask; + } + + /** + * Build a status for this task or null if this task doesn't have status. + * Since most tasks don't have status this defaults to returning null. While + * this can never perform IO it might be a costly operation, requiring + * collating lists of results, etc. So only use it if you need the value. + */ + public Status getStatus() { + return null; + } + + /** + * Returns thread level resource consumption of the task + */ + public Map> getResourceStats() { + return Collections.unmodifiableMap(resourceStats); + } + + /** + * Returns current total resource usage of the task. + * Currently, this method is only called on demand, during get and listing of tasks. + * In the future, these values can be cached as an optimization. + */ + public TaskResourceUsage getTotalResourceStats() { + return new TaskResourceUsage(getTotalResourceUtilization(ResourceStats.CPU), getTotalResourceUtilization(ResourceStats.MEMORY)); + } + + /** + * Returns total resource consumption for a specific task stat. + */ + public long getTotalResourceUtilization(ResourceStats stats) { + long totalResourceConsumption = 0L; + for (List threadResourceInfosList : resourceStats.values()) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfosList) { + final ResourceUsageInfo.ResourceStatsInfo statsInfo = threadResourceInfo.getResourceUsageInfo().getStatsInfo().get(stats); + if (threadResourceInfo.getStatsType().isOnlyForAnalysis() == false && statsInfo != null) { + totalResourceConsumption += statsInfo.getTotalValue(); + } + } + } + return totalResourceConsumption; + } + + /** + * Adds thread's starting resource consumption information + * @param threadId ID of the thread + * @param statsType stats type + * @param resourceUsageMetrics resource consumption metrics of the thread + * @throws IllegalStateException matching active thread entry was found which is not expected. + */ + public void startThreadResourceTracking(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + final List threadResourceInfoList = resourceStats.computeIfAbsent(threadId, k -> new ArrayList<>()); + // active thread entry should not be present in the list + for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) { + if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) { + throw new IllegalStateException( + "unexpected active thread resource entry present [" + threadId + "]:[" + threadResourceInfo + "]" + ); + } + } + threadResourceInfoList.add(new ThreadResourceInfo(threadId, statsType, resourceUsageMetrics)); + incrementResourceTrackingThreads(); + } + + /** + * This method is used to update the resource consumption stats so that the data isn't too stale for long-running task. + * If active thread entry is present in the list, the entry is updated. If one is not found, it throws an exception. + * @param threadId ID of the thread + * @param statsType stats type + * @param resourceUsageMetrics resource consumption metrics of the thread + * @throws IllegalStateException if no matching active thread entry was found. + */ + public void updateThreadResourceStats(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + final List threadResourceInfoList = resourceStats.get(threadId); + if (threadResourceInfoList != null) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) { + // the active entry present in the list is updated + if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) { + threadResourceInfo.recordResourceUsageMetrics(resourceUsageMetrics); + return; + } + } + } + throw new IllegalStateException("cannot update if active thread resource entry is not present"); + } + + /** + * Record the thread's final resource consumption values. + * If active thread entry is present in the list, the entry is updated. If one is not found, it throws an exception. + * @param threadId ID of the thread + * @param statsType stats type + * @param resourceUsageMetrics resource consumption metrics of the thread + * @throws IllegalStateException if no matching active thread entry was found. + */ + public void stopThreadResourceTracking(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + final List threadResourceInfoList = resourceStats.get(threadId); + if (threadResourceInfoList != null) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) { + if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) { + threadResourceInfo.setActive(false); + threadResourceInfo.recordResourceUsageMetrics(resourceUsageMetrics); + decrementResourceTrackingThreads(); + return; + } + } + } + throw new IllegalStateException("cannot update final values if active thread resource entry is not present"); + } + + /** + * Individual tasks can override this if they want to support task resource tracking. We just need to make sure that + * the ThreadPool on which the task runs on have runnable wrapper similar to + * {@link org.opensearch.common.util.concurrent.OpenSearchExecutors#newResizable} + * + * @return true if resource tracking is supported by the task + */ + public boolean supportsResourceTracking() { + return false; + } + + /** + * Report of the internal status of a task. These can vary wildly from task + * to task because each task is implemented differently but we should try + * to keep each task consistent from version to version where possible. + * That means each implementation of {@linkplain ProtobufTask.Status#toXContent} + * should avoid making backwards incompatible changes to the rendered + * result. But if we change the way a request is implemented it might not + * be possible to preserve backwards compatibility. In that case, we + * can change this on version upgrade but we should be careful + * because some statuses (reindex) have become defacto standardized because + * they are used by systems like Kibana. + */ + public interface Status extends ToXContentObject, NamedWriteable {} + + /** + * Returns stored task header associated with the task + */ + public String getHeader(String header) { + return headers.get(header); + } + + public ProtobufTaskResult result(DiscoveryNode node, Exception error) throws IOException { + return new ProtobufTaskResult(taskInfo(node.getId(), true, true), error); + } + + public ProtobufTaskResult result(DiscoveryNode node, ActionResponse response) throws IOException { + if (response instanceof ToXContent) { + return new ProtobufTaskResult(taskInfo(node.getId(), true, true), (ToXContent) response); + } else { + throw new IllegalStateException("response has to implement ToXContent to be able to store the results"); + } + } + + /** + * Registers a task resource tracking completion listener on this task if resource tracking is still active. + * Returns true on successful subscription, false otherwise. + */ + public boolean addResourceTrackingCompletionListener(NotifyOnceListener listener) { + if (numActiveResourceTrackingThreads.get() > 0) { + resourceTrackingCompletionListeners.add(listener); + return true; + } + + return false; + } + + /** + * Increments the number of active resource tracking threads. + * + * @return the number of active resource tracking threads. + */ + public int incrementResourceTrackingThreads() { + return numActiveResourceTrackingThreads.incrementAndGet(); + } + + /** + * Decrements the number of active resource tracking threads. + * This method is called when threads finish execution, and also when the task is unregistered (to mark the task's + * own thread as complete). When the active thread count becomes zero, the onTaskResourceTrackingCompleted method + * is called exactly once on all registered listeners. + * + * Since a task is unregistered after the message is processed, it implies that the threads responsible to produce + * the response must have started prior to it (i.e. startThreadResourceTracking called before unregister). + * This ensures that the number of active threads doesn't drop to zero pre-maturely. + * + * Rarely, some threads may even start execution after the task is unregistered. As resource stats are piggy-backed + * with the response, any thread usage info captured after the task is unregistered may be irrelevant. + * + * @return the number of active resource tracking threads. + */ + public int decrementResourceTrackingThreads() { + int count = numActiveResourceTrackingThreads.decrementAndGet(); + + if (count == 0) { + List listenerExceptions = new ArrayList<>(); + resourceTrackingCompletionListeners.forEach(listener -> { + try { + listener.onResponse(this); + } catch (Exception e1) { + try { + listener.onFailure(e1); + } catch (Exception e2) { + listenerExceptions.add(e2); + } + } + }); + ExceptionsHelper.maybeThrowRuntimeAndSuppress(listenerExceptions); + } + + return count; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java new file mode 100644 index 0000000000000..beebdea5beebb --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java @@ -0,0 +1,50 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import java.util.Map; + +/** + * An interface for a request that can be used to register a task manager task +* +* @opensearch.internal +*/ +public interface ProtobufTaskAwareRequest { + /** + * Set a reference to task that caused this task to be run. + */ + default void setParentTask(String parentTaskNode, long parentTaskId) { + setParentTask(new ProtobufTaskId(parentTaskNode, parentTaskId)); + } + + /** + * Set a reference to task that created this request. + */ + void setParentTask(ProtobufTaskId taskId); + + /** + * Get a reference to the task that created this request. Implementers should default to + * {@link ProtobufTaskId#EMPTY_TASK_ID}, meaning "there is no parent". + */ + ProtobufTaskId getParentTask(); + + /** + * Returns the task object that should be used to keep track of the processing of the request. + */ + default ProtobufTask createTask(long id, String type, String action, ProtobufTaskId parentTaskId, Map headers) { + return new ProtobufTask(id, type, action, getDescription(), parentTaskId, headers); + } + + /** + * Returns optional description of the request to be displayed by the task manager + */ + default String getDescription() { + return ""; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java new file mode 100644 index 0000000000000..81ba9e2cabd5d --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java @@ -0,0 +1,92 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.OpenSearchParseException; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.tasks.proto.TaskIdProto; + +import java.io.IOException; + +/** + * Task id that consists of node id and id of the task on the node +* +* @opensearch.internal +*/ +public final class ProtobufTaskId implements ProtobufWriteable { + + public static final ProtobufTaskId EMPTY_TASK_ID = new ProtobufTaskId(); + + private final TaskIdProto.TaskId taskId; + + public ProtobufTaskId(String nodeId, long id) { + this.taskId = TaskIdProto.TaskId.newBuilder().setNodeId(nodeId).setId(id).build(); + } + + /** + * Builds {@link #EMPTY_TASK_ID}. + */ + private ProtobufTaskId() { + this.taskId = TaskIdProto.TaskId.newBuilder().setNodeId("").setId(-1L).build(); + } + + public ProtobufTaskId(String taskId) { + if (Strings.hasLength(taskId) && "unset".equals(taskId) == false) { + String[] s = Strings.split(taskId, ":"); + if (s == null || s.length != 2) { + throw new IllegalArgumentException("malformed task id " + taskId); + } + String nodeId = s[0]; + try { + long id = Long.parseLong(s[1]); + this.taskId = TaskIdProto.TaskId.newBuilder().setNodeId(nodeId).setId(id).build(); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException("malformed task id " + taskId, ex); + } + } else { + this.taskId = EMPTY_TASK_ID.taskId; + } + } + + /** + * Read a {@linkplain ProtobufTaskId} from a stream. {@linkplain ProtobufTaskId} has this rather than the usual constructor that takes a + * {@linkplain CodedInputStream} so we can return the {@link #EMPTY_TASK_ID} without allocating. + */ + public static ProtobufTaskId readFromStream(CodedInputStream in) throws IOException { + String nodeId = in.readString(); + if (nodeId.isEmpty()) { + /* + * The only TaskId allowed to have the empty string as its nodeId is the EMPTY_TASK_ID and there is only ever one of it and it + * never writes its taskId to save bytes on the wire because it is by far the most common TaskId. + */ + return EMPTY_TASK_ID; + } + return new ProtobufTaskId(nodeId, in.readInt64()); + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream out) throws IOException { + this.taskId.writeTo(out); + } + + public String getNodeId() { + return this.taskId.getNodeId(); + } + + public long getId() { + return this.taskId.getId(); + } + + public boolean isSet() { + return this.taskId.getId() != -1L; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java new file mode 100644 index 0000000000000..e0acdb7f54ec1 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java @@ -0,0 +1,231 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.Version; +import org.opensearch.common.Strings; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; +import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Information about a currently running task. +*

+* Tasks are used for communication with transport actions. As a result, they can contain callback +* references as well as mutable state. That makes it impractical to send tasks over transport channels +* and use in APIs. Instead, immutable and writeable ProtobufTaskInfo objects are used to represent +* snapshot information about currently running tasks. +* +* @opensearch.internal +*/ +public final class ProtobufTaskInfo implements ProtobufWriteable { + private final ProtobufTaskId taskId; + + private final String type; + + private final String action; + + private final String description; + + private final long startTime; + + private final long runningTimeNanos; + + private final ProtobufTask.Status status; + + private final boolean cancellable; + + private final boolean cancelled; + + private final ProtobufTaskId parentTaskId; + + private final Map headers; + + private final ProtobufTaskResourceStats resourceStats; + + private ProtobufStreamInput protobufStreamInput; + + private ProtobufStreamOutput protobufStreamOutput; + + public ProtobufTaskInfo( + ProtobufTaskId taskId, + String type, + String action, + String description, + ProtobufTask.Status status, + long startTime, + long runningTimeNanos, + boolean cancellable, + boolean cancelled, + ProtobufTaskId parentTaskId, + Map headers, + ProtobufTaskResourceStats resourceStats + ) { + if (cancellable == false && cancelled == true) { + throw new IllegalArgumentException("task cannot be cancelled"); + } + this.taskId = taskId; + this.type = type; + this.action = action; + this.description = description; + this.status = status; + this.startTime = startTime; + this.runningTimeNanos = runningTimeNanos; + this.cancellable = cancellable; + this.cancelled = cancelled; + this.parentTaskId = parentTaskId; + this.headers = headers; + this.resourceStats = resourceStats; + } + + /** + * Read from a stream. + */ + @SuppressWarnings("unchecked") + public ProtobufTaskInfo(CodedInputStream in) throws IOException { + protobufStreamInput = new ProtobufStreamInput(); + taskId = ProtobufTaskId.readFromStream(in); + type = in.readString(); + action = in.readString(); + description = protobufStreamInput.readOptionalString(in); + //TODO: fix this + status = null; + startTime = in.readInt64(); + runningTimeNanos = in.readInt64(); + cancellable = in.readBool(); + if (protobufStreamInput.getVersion().onOrAfter(Version.V_2_0_0)) { + cancelled = in.readBool(); + } else { + cancelled = false; + } + if (cancellable == false && cancelled == true) { + throw new IllegalArgumentException("task cannot be cancelled"); + } + parentTaskId = ProtobufTaskId.readFromStream(in); + headers = protobufStreamInput.readMap(CodedInputStream::readString, CodedInputStream::readString, in); + if (protobufStreamInput.getVersion().onOrAfter(Version.V_2_1_0)) { + resourceStats = protobufStreamInput.readOptionalWriteable(ProtobufTaskResourceStats::new, in); + } else { + resourceStats = null; + } + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + protobufStreamOutput = new ProtobufStreamOutput(); + taskId.writeTo(out); + out.writeString(1, type); + out.writeString(2, action); + out.writeString(3, description); + //TODO: fix this + // out.writeOptionalNamedWriteable(status); + out.writeInt64(4, startTime); + out.writeInt64(5, runningTimeNanos); + out.writeBool(6, cancellable); + if (protobufStreamOutput.getVersion().onOrAfter(Version.V_2_0_0)) { + out.writeBool(7, cancelled); + } + parentTaskId.writeTo(out); + protobufStreamOutput.writeMap(headers, CodedOutputStream::writeString, CodedOutputStream::writeString, out); + if (protobufStreamOutput.getVersion().onOrAfter(Version.V_2_1_0)) { + out.writeOptionalWriteable(resourceStats, out); + } + } + + public ProtobufTaskId getTaskId() { + return taskId; + } + + public long getId() { + return taskId.getId(); + } + + public String getType() { + return type; + } + + public String getAction() { + return action; + } + + public String getDescription() { + return description; + } + + /** + * The status of the running task. Only available if TaskInfos were build + * with the detailed flag. + */ + public ProtobufTask.Status getStatus() { + return status; + } + + /** + * Returns the task start time + */ + public long getStartTime() { + return startTime; + } + + /** + * Returns the task running time + */ + public long getRunningTimeNanos() { + return runningTimeNanos; + } + + /** + * Returns true if the task supports cancellation + */ + public boolean isCancellable() { + return cancellable; + } + + /** + * Returns true if the task has been cancelled + */ + public boolean isCancelled() { + return cancelled; + } + + /** + * Returns the parent task id + */ + public ProtobufTaskId getParentTaskId() { + return parentTaskId; + } + + /** + * Returns the task headers + */ + public Map getHeaders() { + return headers; + } + + /** + * Returns the task resource information + */ + public ProtobufTaskResourceStats getResourceStats() { + return resourceStats; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java new file mode 100644 index 0000000000000..30de39551c335 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java @@ -0,0 +1,48 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.tasks.proto.TaskResourceStatsProto; + +import com.google.protobuf.CodedOutputStream; +import com.google.protobuf.CodedInputStream; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Resource information about a currently running task. +*

+* Writeable TaskResourceStats objects are used to represent resource +* snapshot information about currently running task. +* +* @opensearch.internal +*/ +public class ProtobufTaskResourceStats implements ProtobufWriteable { + private final TaskResourceStatsProto.TaskResourceStats taskResourceStats; + + /** + * Read from a stream. + */ + public ProtobufTaskResourceStats(CodedInputStream in) throws IOException { + this.taskResourceStats = TaskResourceStatsProto.TaskResourceStats.parseFrom(in.readByteArray()); + } + + public Map getResourceUsageInfo() { + return this.taskResourceStats.getResourceUsageMap(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + this.taskResourceStats.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java new file mode 100644 index 0000000000000..c3314ae40455f --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java @@ -0,0 +1,226 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import org.opensearch.OpenSearchException; +import org.opensearch.client.Requests; +import org.opensearch.common.Nullable; +import org.opensearch.core.ParseField; +import org.opensearch.common.Strings; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.core.xcontent.InstantiatingObjectParser; +import org.opensearch.common.xcontent.ObjectParserHelper; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.emptyMap; +import static java.util.Objects.requireNonNull; +import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; +import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.opensearch.common.xcontent.XContentHelper.convertToMap; + +/** + * Information about a running task or a task that stored its result. Running tasks just have a {@link #getTask()} while +* tasks with stored result will have either a {@link #getError()} or {@link #getResponse()}. +* +* @opensearch.internal +*/ +public final class ProtobufTaskResult implements ProtobufWriteable, ToXContentObject { + private final boolean completed; + private final ProtobufTaskInfo task; + @Nullable + private final BytesReference error; + @Nullable + private final BytesReference response; + + /** + * Construct a {@linkplain TaskResult} for a task for which we don't have a result or error. That usually means that the task + * is incomplete, but it could also mean that we waited for the task to complete but it didn't save any error information. + */ + public ProtobufTaskResult(boolean completed, ProtobufTaskInfo task) { + this(completed, task, null, null); + } + + /** + * Construct a {@linkplain TaskResult} for a task that completed with an error. + */ + public ProtobufTaskResult(ProtobufTaskInfo task, Exception error) throws IOException { + this(true, task, toXContent(error), null); + } + + /** + * Construct a {@linkplain ProtobufTaskResult} for a task that completed successfully. + */ + public ProtobufTaskResult(ProtobufTaskInfo task, ToXContent response) throws IOException { + this(true, task, null, XContentHelper.toXContent(response, Requests.INDEX_CONTENT_TYPE, true)); + } + + public ProtobufTaskResult(boolean completed, ProtobufTaskInfo task, @Nullable BytesReference error, @Nullable BytesReference result) { + this.completed = completed; + this.task = requireNonNull(task, "task is required"); + this.error = error; + this.response = result; + } + + /** + * Read from a stream. + */ + public ProtobufTaskResult(com.google.protobuf.CodedInputStream in) throws IOException { + completed = in.readBool(); + task = new ProtobufTaskInfo(in); + error = in.readOptionalBytesReference(); + response = in.readOptionalBytesReference(); + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream out) throws IOException { + out.writeBool(0, completed); + task.writeTo(out); + out.writeByteArray(1, error); + out.writeOptionalBytesReference(response); + } + + /** + * Get the task that this wraps. + */ + public ProtobufTaskInfo getTask() { + return task; + } + + /** + * Get the error that finished this task. Will return null if the task didn't finish with an error, it hasn't yet finished, or didn't + * store its result. + */ + public BytesReference getError() { + return error; + } + + /** + * Convert {@link #getError()} from XContent to a Map for easy processing. Will return an empty map if the task didn't finish with an + * error, hasn't yet finished, or didn't store its result. + */ + public Map getErrorAsMap() { + if (error == null) { + return emptyMap(); + } + return convertToMap(error, false).v2(); + } + + /** + * Get the response that this task finished with. Will return null if the task was finished by an error, it hasn't yet finished, or + * didn't store its result. + */ + public BytesReference getResponse() { + return response; + } + + /** + * Convert {@link #getResponse()} from XContent to a Map for easy processing. Will return an empty map if the task was finished with an + * error, hasn't yet finished, or didn't store its result. + */ + public Map getResponseAsMap() { + if (response == null) { + return emptyMap(); + } + return convertToMap(response, false).v2(); + } + + public boolean isCompleted() { + return completed; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder, params); + return builder.endObject(); + } + + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("completed", completed); + builder.startObject("task"); + task.toXContent(builder, params); + builder.endObject(); + if (error != null) { + XContentHelper.writeRawField("error", error, builder, params); + } + if (response != null) { + XContentHelper.writeRawField("response", response, builder, params); + } + return builder; + } + + public static final InstantiatingObjectParser PARSER; + + static { + InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( + "stored_task_result", + true, + ProtobufTaskResult.class + ); + parser.declareBoolean(constructorArg(), new ParseField("completed")); + parser.declareObject(constructorArg(), ProtobufTaskInfo.PARSER, new ParseField("task")); + ObjectParserHelper parserHelper = new ObjectParserHelper<>(); + parserHelper.declareRawObject(parser, optionalConstructorArg(), new ParseField("error")); + parserHelper.declareRawObject(parser, optionalConstructorArg(), new ParseField("response")); + PARSER = parser.build(); + } + + @Override + public String toString() { + return Strings.toString(XContentType.JSON, this); + } + + // Implements equals and hashcode for testing + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != ProtobufTaskResult.class) { + return false; + } + ProtobufTaskResult other = (ProtobufTaskResult) obj; + /* + * Equality of error and result is done by converting them to a map first. Not efficient but ignores field order and spacing + * differences so perfect for testing. + */ + return Objects.equals(completed, other.completed) + && Objects.equals(task, other.task) + && Objects.equals(getErrorAsMap(), other.getErrorAsMap()) + && Objects.equals(getResponseAsMap(), other.getResponseAsMap()); + } + + @Override + public int hashCode() { + /* + * Hashing of error and result is done by converting them to a map first. Not efficient but ignores field order and spacing + * differences so perfect for testing. + */ + return Objects.hash(completed, task, getErrorAsMap(), getResponseAsMap()); + } + + private static BytesReference toXContent(Exception error) throws IOException { + try (XContentBuilder builder = XContentFactory.contentBuilder(Requests.INDEX_CONTENT_TYPE)) { + builder.startObject(); + OpenSearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, error); + builder.endObject(); + return BytesReference.bytes(builder); + } + } +} From c8c2f538a27178ac830eed7e9731dbcca5d8506c Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Fri, 21 Apr 2023 23:59:39 +0000 Subject: [PATCH 04/37] Adding ProtobufStreamInput and ProtobufStreamOutput for additional stream related functionality Signed-off-by: Vacha Shah --- .../common/io/stream/ProtobufStreamInput.java | 141 ++++++++++++++++++ .../io/stream/ProtobufStreamOutput.java | 63 ++++++++ 2 files changed, 204 insertions(+) create mode 100644 server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java create mode 100644 server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java diff --git a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java new file mode 100644 index 0000000000000..da867e7e487e1 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java @@ -0,0 +1,141 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.common.io.stream; + +import com.google.protobuf.CodedInputStream; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import org.apache.lucene.util.ArrayUtil; +import org.opensearch.Version; +import org.opensearch.common.Nullable; + +public class ProtobufStreamInput { + + private Version version = Version.CURRENT; + + /** + * The version of the node on the other side of this stream. + */ + public Version getVersion() { + return this.version; + } + + /** + * Set the version of the node on the other side of this stream. + */ + public void setVersion(Version version) { + this.version = version; + } + + @Nullable + public String readOptionalString(CodedInputStream in) throws IOException { + if (readBoolean(in)) { + return in.readString(); + } + return null; + } + + /** + * If the returned map contains any entries it will be mutable. If it is empty it might be immutable. + */ + public Map readMap(ProtobufWriteable.Reader keyReader, ProtobufWriteable.Reader valueReader, CodedInputStream in) throws IOException { + int size = readArraySize(in); + if (size == 0) { + return Collections.emptyMap(); + } + Map map = new HashMap<>(size); + for (int i = 0; i < size; i++) { + K key = keyReader.read(in); + V value = valueReader.read(in); + map.put(key, value); + } + return map; + } + + @Nullable + public T readOptionalWriteable(ProtobufWriteable.Reader reader, CodedInputStream in) throws IOException { + if (readBoolean(in)) { + T t = reader.read(in); + if (t == null) { + throw new IOException( + "Writeable.Reader [" + reader + "] returned null which is not allowed and probably means it screwed up the stream." + ); + } + return t; + } else { + return null; + } + } + + private int readArraySize(CodedInputStream in) throws IOException { + final int arraySize = readVInt(in); + if (arraySize > ArrayUtil.MAX_ARRAY_LENGTH) { + throw new IllegalStateException("array length must be <= to " + ArrayUtil.MAX_ARRAY_LENGTH + " but was: " + arraySize); + } + if (arraySize < 0) { + throw new NegativeArraySizeException("array size must be positive but was: " + arraySize); + } + // lets do a sanity check that if we are reading an array size that is bigger that the remaining bytes we can safely + // throw an exception instead of allocating the array based on the size. A simple corrutpted byte can make a node go OOM + // if the size is large and for perf reasons we allocate arrays ahead of time + // ensureCanReadBytes(arraySize); + return arraySize; + } + + public int readVInt(CodedInputStream in) throws IOException { + byte b = in.readRawByte(); + int i = b & 0x7F; + if ((b & 0x80) == 0) { + return i; + } + b = in.readRawByte(); + i |= (b & 0x7F) << 7; + if ((b & 0x80) == 0) { + return i; + } + b = in.readRawByte(); + i |= (b & 0x7F) << 14; + if ((b & 0x80) == 0) { + return i; + } + b = in.readRawByte(); + i |= (b & 0x7F) << 21; + if ((b & 0x80) == 0) { + return i; + } + b = in.readRawByte(); + if ((b & 0x80) != 0) { + throw new IOException("Invalid vInt ((" + Integer.toHexString(b) + " & 0x7f) << 28) | " + Integer.toHexString(i)); + } + return i | ((b & 0x7F) << 28); + } + + /** + * Reads a boolean. + */ + public final boolean readBoolean(CodedInputStream in) throws IOException { + return readBoolean(in.readRawByte()); + } + + private boolean readBoolean(final byte value) { + if (value == 0) { + return false; + } else if (value == 1) { + return true; + } else { + final String message = String.format(Locale.ROOT, "unexpected byte [0x%02x]", value); + throw new IllegalStateException(message); + } + } +} \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java new file mode 100644 index 0000000000000..7b69249a661bd --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java @@ -0,0 +1,63 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.common.io.stream; + +import com.google.protobuf.CodedOutputStream; + +import java.io.IOException; +import java.util.Map; + +import org.opensearch.Version; +import org.opensearch.common.Nullable; + +public class ProtobufStreamOutput { + + private Version version = Version.CURRENT; + + /** + * The version of the node on the other side of this stream. + */ + public Version getVersion() { + return this.version; + } + + /** + * Set the version of the node on the other side of this stream. + */ + public void setVersion(Version version) { + this.version = version; + } + + /** + * Write a {@link Map} of {@code K}-type keys to {@code V}-type. + *


+     * Map<String, String> map = ...;
+     * out.writeMap(map, StreamOutput::writeString, StreamOutput::writeString);
+     * 
+ * + * @param keyWriter The key writer + * @param valueWriter The value writer + */ + public final void writeMap(final Map map, final ProtobufWriteable.Writer keyWriter, final ProtobufWriteable.Writer valueWriter, CodedOutputStream out) throws IOException { + for (final Map.Entry entry : map.entrySet()) { + keyWriter.write(out, entry.getKey()); + valueWriter.write(out, entry.getValue()); + } + } + + public void writeOptionalWriteable(@Nullable ProtobufWriteable writeable, CodedOutputStream out) throws IOException { + if (writeable != null) { + out.writeBool(1, true); + writeable.writeTo(out); + } else { + out.writeBool(1, false); + } + } + +} \ No newline at end of file From a2be2fa46ec67478031eba797ea738cbcfee5d60 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Sat, 22 Apr 2023 00:00:23 +0000 Subject: [PATCH 05/37] Adding TransportMessage and TransportRequest classes with protobuf integration Signed-off-by: Vacha Shah --- .../transport/ProtobufTransportMessage.java | 43 ++++++++++++++ .../transport/ProtobufTransportRequest.java | 59 +++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportMessage.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportRequest.java diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportMessage.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportMessage.java new file mode 100644 index 0000000000000..0bca490774ec4 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportMessage.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.transport.TransportAddress; + +/** + * Message over the transport interface with Protobuf serialization. +* +* @opensearch.internal +*/ +public abstract class ProtobufTransportMessage implements ProtobufWriteable { + + private TransportAddress remoteAddress; + + public void remoteAddress(TransportAddress remoteAddress) { + this.remoteAddress = remoteAddress; + } + + public TransportAddress remoteAddress() { + return remoteAddress; + } + + /** + * Constructs a new empty transport message + */ + public ProtobufTransportMessage() {} + + /** + * Constructs a new transport message with the data from the {@link CodedInputStream}. This is + * currently a no-op + */ + public ProtobufTransportMessage(com.google.protobuf.CodedInputStream in) {} +} + \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportRequest.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportRequest.java new file mode 100644 index 0000000000000..ec78e3cdb2bf1 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportRequest.java @@ -0,0 +1,59 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.tasks.ProtobufTaskId; +import org.opensearch.tasks.ProtobufTaskAwareRequest; +import org.opensearch.tasks.TaskId; + +import java.io.IOException; + +/** + * A transport request with Protobuf serialization. +* +* @opensearch.internal +*/ +public abstract class ProtobufTransportRequest extends ProtobufTransportMessage implements ProtobufTaskAwareRequest { + + /** + * Parent of this request. Defaults to {@link TaskId#EMPTY_TASK_ID}, meaning "no parent". + */ + private ProtobufTaskId parentTaskId = ProtobufTaskId.EMPTY_TASK_ID; + + public ProtobufTransportRequest() {} + + public ProtobufTransportRequest(com.google.protobuf.CodedInputStream in) throws IOException { + parentTaskId = ProtobufTaskId.readFromStream(in); + } + + /** + * Set a reference to task that created this request. + */ + @Override + public void setParentTask(ProtobufTaskId taskId) { + this.parentTaskId = taskId; + } + + /** + * Get a reference to the task that created this request. Defaults to {@link TaskId#EMPTY_TASK_ID}, meaning "there is no parent". + */ + @Override + public ProtobufTaskId getParentTask() { + return parentTaskId; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream out) throws IOException { + parentTaskId.writeTo(out); + } +} From c0bbf92ba1380d35871b9e30a389ac2934491532 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Tue, 25 Apr 2023 00:16:31 +0000 Subject: [PATCH 06/37] Fixing build and precommit Signed-off-by: Vacha Shah --- server/build.gradle | 1 + .../common/io/stream/BaseWriteable.java | 4 +- .../common/io/stream/ProtobufStreamInput.java | 40 +++++++- .../io/stream/ProtobufStreamOutput.java | 16 ++- .../tasks/ProtobufCancellableTask.java | 14 ++- .../org/opensearch/tasks/ProtobufTask.java | 20 +++- .../tasks/ProtobufTaskAwareRequest.java | 3 + .../org/opensearch/tasks/ProtobufTaskId.java | 6 +- .../opensearch/tasks/ProtobufTaskInfo.java | 72 ++++++++++---- .../tasks/ProtobufTaskResourceStats.java | 28 +++++- .../opensearch/tasks/ProtobufTaskResult.java | 99 +++++-------------- .../transport/ProtobufTransportMessage.java | 18 ++-- .../transport/ProtobufTransportRequest.java | 9 +- 13 files changed, 204 insertions(+), 126 deletions(-) diff --git a/server/build.gradle b/server/build.gradle index 3fde1b745c546..efe2eae553364 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -394,6 +394,7 @@ tasks.named("licenseHeaders").configure { excludes << 'org/opensearch/client/documentation/placeholder.txt' // Ignore for protobuf generated code excludes << 'org/opensearch/extensions/proto/*' + excludes << 'org/opensearch/tasks/proto/*' } tasks.test { diff --git a/server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java b/server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java index 7d7fa18bb6afa..7364e1c24e47f 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java +++ b/server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java @@ -21,7 +21,7 @@ public interface BaseWriteable { * Write this into the stream output. */ void writeTo(T out) throws IOException; - + /** * Reference to a method that can write some object to a given type. */ @@ -42,7 +42,7 @@ interface Writer { */ @FunctionalInterface interface Reader { - + /** * Read {@code V}-type value from a {@code T}-type stream. * diff --git a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java index da867e7e487e1..843d6755e42d7 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java +++ b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java @@ -17,9 +17,15 @@ import java.util.Map; import org.apache.lucene.util.ArrayUtil; + +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.bytes.BytesReference; import org.opensearch.Version; import org.opensearch.common.Nullable; +/** + * A class for additional methods to read from a {@link CodedInputStream}. + */ public class ProtobufStreamInput { private Version version = Version.CURRENT; @@ -49,7 +55,8 @@ public String readOptionalString(CodedInputStream in) throws IOException { /** * If the returned map contains any entries it will be mutable. If it is empty it might be immutable. */ - public Map readMap(ProtobufWriteable.Reader keyReader, ProtobufWriteable.Reader valueReader, CodedInputStream in) throws IOException { + public Map readMap(ProtobufWriteable.Reader keyReader, ProtobufWriteable.Reader valueReader, CodedInputStream in) + throws IOException { int size = readArraySize(in); if (size == 0) { return Collections.emptyMap(); @@ -64,7 +71,8 @@ public Map readMap(ProtobufWriteable.Reader keyReader, ProtobufW } @Nullable - public T readOptionalWriteable(ProtobufWriteable.Reader reader, CodedInputStream in) throws IOException { + public T readOptionalWriteable(ProtobufWriteable.Reader reader, CodedInputStream in) + throws IOException { if (readBoolean(in)) { T t = reader.read(in); if (t == null) { @@ -128,6 +136,32 @@ public final boolean readBoolean(CodedInputStream in) throws IOException { return readBoolean(in.readRawByte()); } + /** + * Reads an optional bytes reference from this stream. It might hold an actual reference to the underlying bytes of the stream. Use this + * only if you must differentiate null from empty. + */ + @Nullable + public BytesReference readOptionalBytesReference(CodedInputStream in) throws IOException { + int length = readVInt(in) - 1; + if (length < 0) { + return null; + } + return readBytesReference(length, in); + } + + /** + * Reads a bytes reference from this stream, might hold an actual reference to the underlying + * bytes of the stream. + */ + public BytesReference readBytesReference(int length, CodedInputStream in) throws IOException { + if (length == 0) { + return BytesArray.EMPTY; + } + byte[] bytes = new byte[length]; + bytes = in.readByteArray(); + return new BytesArray(bytes, 0, length); + } + private boolean readBoolean(final byte value) { if (value == 0) { return false; @@ -138,4 +172,4 @@ private boolean readBoolean(final byte value) { throw new IllegalStateException(message); } } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java index 7b69249a661bd..b6082159d024a 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java +++ b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java @@ -16,6 +16,9 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; +/** + * A class for additional methods to write to a {@link CodedOutputStream}. + */ public class ProtobufStreamOutput { private Version version = Version.CURRENT; @@ -44,7 +47,12 @@ public void setVersion(Version version) { * @param keyWriter The key writer * @param valueWriter The value writer */ - public final void writeMap(final Map map, final ProtobufWriteable.Writer keyWriter, final ProtobufWriteable.Writer valueWriter, CodedOutputStream out) throws IOException { + public final void writeMap( + final Map map, + final ProtobufWriteable.Writer keyWriter, + final ProtobufWriteable.Writer valueWriter, + CodedOutputStream out + ) throws IOException { for (final Map.Entry entry : map.entrySet()) { keyWriter.write(out, entry.getKey()); valueWriter.write(out, entry.getValue()); @@ -53,11 +61,11 @@ public final void writeMap(final Map map, final ProtobufWriteable.W public void writeOptionalWriteable(@Nullable ProtobufWriteable writeable, CodedOutputStream out) throws IOException { if (writeable != null) { - out.writeBool(1, true); + out.writeBoolNoTag(true); writeable.writeTo(out); } else { - out.writeBool(1, false); + out.writeBoolNoTag(false); } } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java b/server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java index 9e47da0265e86..fad1565390cd3 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java @@ -4,6 +4,9 @@ * The OpenSearch Contributors require contributions made to * this file be licensed under the Apache-2.0 license or a * compatible open source license. +* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. */ package org.opensearch.tasks; @@ -17,7 +20,7 @@ import static org.opensearch.search.SearchService.NO_TIMEOUT; /** - * A task that can be canceled + * A protobuf task that can be canceled * * @opensearch.internal */ @@ -27,7 +30,14 @@ public abstract class ProtobufCancellableTask extends ProtobufTask { private final AtomicBoolean cancelled = new AtomicBoolean(false); private final TimeValue cancelAfterTimeInterval; - public ProtobufCancellableTask(long id, String type, String action, String description, ProtobufTaskId parentTaskId, Map headers) { + public ProtobufCancellableTask( + long id, + String type, + String action, + String description, + ProtobufTaskId parentTaskId, + Map headers + ) { this(id, type, action, description, parentTaskId, headers, NO_TIMEOUT); } diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTask.java b/server/src/main/java/org/opensearch/tasks/ProtobufTask.java index dd3a446a0d120..7c7ee83adb679 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTask.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTask.java @@ -4,6 +4,9 @@ * The OpenSearch Contributors require contributions made to * this file be licensed under the Apache-2.0 license or a * compatible open source license. +* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. */ package org.opensearch.tasks; @@ -17,6 +20,7 @@ import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.tasks.proto.TaskResourceStatsProto; import java.io.IOException; import java.util.ArrayList; @@ -28,7 +32,7 @@ import java.util.concurrent.atomic.AtomicInteger; /** - * Current task information + * Current protobuf task information * * @opensearch.internal */ @@ -167,7 +171,12 @@ protected final ProtobufTaskInfo taskInfo(String localNodeId, String description /** * Build a proper {@link ProtobufTaskInfo} for this task. */ - protected final ProtobufTaskInfo taskInfo(String localNodeId, String description, Status status, TaskResourceStats resourceStats) { + protected final ProtobufTaskInfo taskInfo( + String localNodeId, + String description, + Status status, + ProtobufTaskResourceStats resourceStats + ) { return new ProtobufTaskInfo( new ProtobufTaskId(localNodeId, getId()), getType(), @@ -255,8 +264,11 @@ public Map> getResourceStats() { * Currently, this method is only called on demand, during get and listing of tasks. * In the future, these values can be cached as an optimization. */ - public TaskResourceUsage getTotalResourceStats() { - return new TaskResourceUsage(getTotalResourceUtilization(ResourceStats.CPU), getTotalResourceUtilization(ResourceStats.MEMORY)); + public TaskResourceStatsProto.TaskResourceStats.TaskResourceUsage getTotalResourceStats() { + return TaskResourceStatsProto.TaskResourceStats.TaskResourceUsage.newBuilder() + .setCpuTimeInNanos(getTotalResourceUtilization(ResourceStats.CPU)) + .setMemoryInBytes(getTotalResourceUtilization(ResourceStats.MEMORY)) + .build(); } /** diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java index beebdea5beebb..3ff06c87fb880 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java @@ -4,6 +4,9 @@ * The OpenSearch Contributors require contributions made to * this file be licensed under the Apache-2.0 license or a * compatible open source license. +* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. */ package org.opensearch.tasks; diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java index 81ba9e2cabd5d..5e0270c470f0b 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java @@ -4,13 +4,15 @@ * The OpenSearch Contributors require contributions made to * this file be licensed under the Apache-2.0 license or a * compatible open source license. +* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. */ package org.opensearch.tasks; import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; -import org.opensearch.OpenSearchParseException; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.tasks.proto.TaskIdProto; @@ -74,7 +76,7 @@ public static ProtobufTaskId readFromStream(CodedInputStream in) throws IOExcept } @Override - public void writeTo(com.google.protobuf.CodedOutputStream out) throws IOException { + public void writeTo(CodedOutputStream out) throws IOException { this.taskId.writeTo(out); } diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java index e0acdb7f54ec1..71939b4be2bfb 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java @@ -4,6 +4,9 @@ * The OpenSearch Contributors require contributions made to * this file be licensed under the Apache-2.0 license or a * compatible open source license. +* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. */ package org.opensearch.tasks; @@ -11,23 +14,17 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; import org.opensearch.Version; -import org.opensearch.common.Strings; -import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; import org.opensearch.common.io.stream.ProtobufWriteable; -import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Collections; import java.util.Map; -import java.util.Objects; import java.util.concurrent.TimeUnit; -import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; -import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; - /** * Information about a currently running task. *

@@ -38,7 +35,7 @@ * * @opensearch.internal */ -public final class ProtobufTaskInfo implements ProtobufWriteable { +public final class ProtobufTaskInfo implements ProtobufWriteable, ToXContentFragment { private final ProtobufTaskId taskId; private final String type; @@ -108,7 +105,7 @@ public ProtobufTaskInfo(CodedInputStream in) throws IOException { type = in.readString(); action = in.readString(); description = protobufStreamInput.readOptionalString(in); - //TODO: fix this + // TODO: fix this status = null; startTime = in.readInt64(); runningTimeNanos = in.readInt64(); @@ -134,21 +131,21 @@ public ProtobufTaskInfo(CodedInputStream in) throws IOException { public void writeTo(CodedOutputStream out) throws IOException { protobufStreamOutput = new ProtobufStreamOutput(); taskId.writeTo(out); - out.writeString(1, type); - out.writeString(2, action); - out.writeString(3, description); - //TODO: fix this + out.writeStringNoTag(type); + out.writeStringNoTag(action); + out.writeStringNoTag(description); + // TODO: fix this // out.writeOptionalNamedWriteable(status); - out.writeInt64(4, startTime); - out.writeInt64(5, runningTimeNanos); - out.writeBool(6, cancellable); + out.writeInt64NoTag(startTime); + out.writeInt64NoTag(runningTimeNanos); + out.writeBoolNoTag(cancellable); if (protobufStreamOutput.getVersion().onOrAfter(Version.V_2_0_0)) { - out.writeBool(7, cancelled); + out.writeBoolNoTag(cancelled); } parentTaskId.writeTo(out); - protobufStreamOutput.writeMap(headers, CodedOutputStream::writeString, CodedOutputStream::writeString, out); + protobufStreamOutput.writeMap(headers, CodedOutputStream::writeStringNoTag, CodedOutputStream::writeStringNoTag, out); if (protobufStreamOutput.getVersion().onOrAfter(Version.V_2_1_0)) { - out.writeOptionalWriteable(resourceStats, out); + protobufStreamOutput.writeOptionalWriteable(resourceStats, out); } } @@ -228,4 +225,39 @@ public Map getHeaders() { public ProtobufTaskResourceStats getResourceStats() { return resourceStats; } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("node", taskId.getNodeId()); + builder.field("id", taskId.getId()); + builder.field("type", type); + builder.field("action", action); + if (status != null) { + builder.field("status", status, params); + } + if (description != null) { + builder.field("description", description); + } + builder.timeField("start_time_in_millis", "start_time", startTime); + if (builder.humanReadable()) { + builder.field("running_time", new TimeValue(runningTimeNanos, TimeUnit.NANOSECONDS).toString()); + } + builder.field("running_time_in_nanos", runningTimeNanos); + builder.field("cancellable", cancellable); + builder.field("cancelled", cancelled); + if (parentTaskId.isSet()) { + builder.field("parent_task_id", parentTaskId.toString()); + } + builder.startObject("headers"); + for (Map.Entry attribute : headers.entrySet()) { + builder.field(attribute.getKey(), attribute.getValue()); + } + builder.endObject(); + if (resourceStats != null) { + builder.startObject("resource_stats"); + resourceStats.toXContent(builder, params); + builder.endObject(); + } + return builder; + } } diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java index 30de39551c335..61220be1026a9 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java @@ -4,20 +4,23 @@ * The OpenSearch Contributors require contributions made to * this file be licensed under the Apache-2.0 license or a * compatible open source license. +* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. */ package org.opensearch.tasks; import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.proto.TaskResourceStatsProto; import com.google.protobuf.CodedOutputStream; import com.google.protobuf.CodedInputStream; import java.io.IOException; -import java.util.HashMap; import java.util.Map; -import java.util.Objects; /** * Resource information about a currently running task. @@ -27,9 +30,13 @@ * * @opensearch.internal */ -public class ProtobufTaskResourceStats implements ProtobufWriteable { +public class ProtobufTaskResourceStats implements ProtobufWriteable, ToXContentFragment { private final TaskResourceStatsProto.TaskResourceStats taskResourceStats; + public ProtobufTaskResourceStats(Map resourceUsage) { + this.taskResourceStats = TaskResourceStatsProto.TaskResourceStats.newBuilder().putAllResourceUsage(resourceUsage).build(); + } + /** * Read from a stream. */ @@ -45,4 +52,19 @@ public Map g public void writeTo(CodedOutputStream out) throws IOException { this.taskResourceStats.writeTo(out); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + Map resourceUsage = this.taskResourceStats + .getResourceUsageMap(); + for (Map.Entry resourceUsageEntry : resourceUsage.entrySet()) { + builder.startObject(resourceUsageEntry.getKey()); + if (resourceUsageEntry.getValue() != null) { + builder.field("cpu_time_in_nanos", resourceUsageEntry.getValue().getCpuTimeInNanos()); + builder.field("memory_in_bytes", resourceUsageEntry.getValue().getMemoryInBytes()); + } + builder.endObject(); + } + return builder; + } } diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java index c3314ae40455f..d4a2402961cc1 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java @@ -4,41 +4,36 @@ * The OpenSearch Contributors require contributions made to * this file be licensed under the Apache-2.0 license or a * compatible open source license. +* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. */ package org.opensearch.tasks; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; import org.opensearch.OpenSearchException; import org.opensearch.client.Requests; import org.opensearch.common.Nullable; -import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufWriteable; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.io.stream.Writeable; -import org.opensearch.core.xcontent.InstantiatingObjectParser; -import org.opensearch.common.xcontent.ObjectParserHelper; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Map; -import java.util.Objects; import static java.util.Collections.emptyMap; import static java.util.Objects.requireNonNull; -import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; -import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.opensearch.common.xcontent.XContentHelper.convertToMap; /** - * Information about a running task or a task that stored its result. Running tasks just have a {@link #getTask()} while +* Information about a running task or a task that stored its result. Running tasks just have a {@link #getTask()} while * tasks with stored result will have either a {@link #getError()} or {@link #getResponse()}. * * @opensearch.internal @@ -51,6 +46,8 @@ public final class ProtobufTaskResult implements ProtobufWriteable, ToXContentOb @Nullable private final BytesReference response; + private ProtobufStreamInput protobufStreamInput; + /** * Construct a {@linkplain TaskResult} for a task for which we don't have a result or error. That usually means that the task * is incomplete, but it could also mean that we waited for the task to complete but it didn't save any error information. @@ -60,14 +57,14 @@ public ProtobufTaskResult(boolean completed, ProtobufTaskInfo task) { } /** - * Construct a {@linkplain TaskResult} for a task that completed with an error. + * Construct a {@linkplain TaskResult} for a task that completed with an error. */ public ProtobufTaskResult(ProtobufTaskInfo task, Exception error) throws IOException { this(true, task, toXContent(error), null); } /** - * Construct a {@linkplain ProtobufTaskResult} for a task that completed successfully. + * Construct a {@linkplain ProtobufTaskResult} for a task that completed successfully. */ public ProtobufTaskResult(ProtobufTaskInfo task, ToXContent response) throws IOException { this(true, task, null, XContentHelper.toXContent(response, Requests.INDEX_CONTENT_TYPE, true)); @@ -81,32 +78,33 @@ public ProtobufTaskResult(boolean completed, ProtobufTaskInfo task, @Nullable By } /** - * Read from a stream. + * Read from a stream. */ - public ProtobufTaskResult(com.google.protobuf.CodedInputStream in) throws IOException { + public ProtobufTaskResult(CodedInputStream in) throws IOException { + protobufStreamInput = new ProtobufStreamInput(); completed = in.readBool(); task = new ProtobufTaskInfo(in); - error = in.readOptionalBytesReference(); - response = in.readOptionalBytesReference(); + error = protobufStreamInput.readOptionalBytesReference(in); + response = protobufStreamInput.readOptionalBytesReference(in); } @Override - public void writeTo(com.google.protobuf.CodedOutputStream out) throws IOException { - out.writeBool(0, completed); + public void writeTo(CodedOutputStream out) throws IOException { + out.writeBoolNoTag(completed); task.writeTo(out); - out.writeByteArray(1, error); - out.writeOptionalBytesReference(response); + out.writeByteArrayNoTag(BytesReference.toBytes(error)); + out.writeByteArrayNoTag(BytesReference.toBytes(response)); } /** - * Get the task that this wraps. + * Get the task that this wraps. */ public ProtobufTaskInfo getTask() { return task; } /** - * Get the error that finished this task. Will return null if the task didn't finish with an error, it hasn't yet finished, or didn't + * Get the error that finished this task. Will return null if the task didn't finish with an error, it hasn't yet finished, or didn't * store its result. */ public BytesReference getError() { @@ -114,7 +112,7 @@ public BytesReference getError() { } /** - * Convert {@link #getError()} from XContent to a Map for easy processing. Will return an empty map if the task didn't finish with an + * Convert {@link #getError()} from XContent to a Map for easy processing. Will return an empty map if the task didn't finish with an * error, hasn't yet finished, or didn't store its result. */ public Map getErrorAsMap() { @@ -125,7 +123,7 @@ public Map getErrorAsMap() { } /** - * Get the response that this task finished with. Will return null if the task was finished by an error, it hasn't yet finished, or + * Get the response that this task finished with. Will return null if the task was finished by an error, it hasn't yet finished, or * didn't store its result. */ public BytesReference getResponse() { @@ -133,7 +131,7 @@ public BytesReference getResponse() { } /** - * Convert {@link #getResponse()} from XContent to a Map for easy processing. Will return an empty map if the task was finished with an + * Convert {@link #getResponse()} from XContent to a Map for easy processing. Will return an empty map if the task was finished with an * error, hasn't yet finished, or didn't store its result. */ public Map getResponseAsMap() { @@ -168,53 +166,6 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t return builder; } - public static final InstantiatingObjectParser PARSER; - - static { - InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( - "stored_task_result", - true, - ProtobufTaskResult.class - ); - parser.declareBoolean(constructorArg(), new ParseField("completed")); - parser.declareObject(constructorArg(), ProtobufTaskInfo.PARSER, new ParseField("task")); - ObjectParserHelper parserHelper = new ObjectParserHelper<>(); - parserHelper.declareRawObject(parser, optionalConstructorArg(), new ParseField("error")); - parserHelper.declareRawObject(parser, optionalConstructorArg(), new ParseField("response")); - PARSER = parser.build(); - } - - @Override - public String toString() { - return Strings.toString(XContentType.JSON, this); - } - - // Implements equals and hashcode for testing - @Override - public boolean equals(Object obj) { - if (obj == null || obj.getClass() != ProtobufTaskResult.class) { - return false; - } - ProtobufTaskResult other = (ProtobufTaskResult) obj; - /* - * Equality of error and result is done by converting them to a map first. Not efficient but ignores field order and spacing - * differences so perfect for testing. - */ - return Objects.equals(completed, other.completed) - && Objects.equals(task, other.task) - && Objects.equals(getErrorAsMap(), other.getErrorAsMap()) - && Objects.equals(getResponseAsMap(), other.getResponseAsMap()); - } - - @Override - public int hashCode() { - /* - * Hashing of error and result is done by converting them to a map first. Not efficient but ignores field order and spacing - * differences so perfect for testing. - */ - return Objects.hash(completed, task, getErrorAsMap(), getResponseAsMap()); - } - private static BytesReference toXContent(Exception error) throws IOException { try (XContentBuilder builder = XContentFactory.contentBuilder(Requests.INDEX_CONTENT_TYPE)) { builder.startObject(); diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportMessage.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportMessage.java index 0bca490774ec4..0ec5f5cb5de9c 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportMessage.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportMessage.java @@ -1,10 +1,13 @@ /* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ package org.opensearch.transport; @@ -38,6 +41,5 @@ public ProtobufTransportMessage() {} * Constructs a new transport message with the data from the {@link CodedInputStream}. This is * currently a no-op */ - public ProtobufTransportMessage(com.google.protobuf.CodedInputStream in) {} + public ProtobufTransportMessage(CodedInputStream in) {} } - \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportRequest.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportRequest.java index ec78e3cdb2bf1..3511655702a89 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportRequest.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportRequest.java @@ -4,14 +4,15 @@ * The OpenSearch Contributors require contributions made to * this file be licensed under the Apache-2.0 license or a * compatible open source license. +* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. */ package org.opensearch.transport; import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.tasks.ProtobufTaskId; import org.opensearch.tasks.ProtobufTaskAwareRequest; import org.opensearch.tasks.TaskId; @@ -32,7 +33,7 @@ public abstract class ProtobufTransportRequest extends ProtobufTransportMessage public ProtobufTransportRequest() {} - public ProtobufTransportRequest(com.google.protobuf.CodedInputStream in) throws IOException { + public ProtobufTransportRequest(CodedInputStream in) throws IOException { parentTaskId = ProtobufTaskId.readFromStream(in); } @@ -53,7 +54,7 @@ public ProtobufTaskId getParentTask() { } @Override - public void writeTo(com.google.protobuf.CodedOutputStream out) throws IOException { + public void writeTo(CodedOutputStream out) throws IOException { parentTaskId.writeTo(out); } } From 551fe82c4768fc483098edd94cde68d60d03f6da Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Mon, 8 May 2023 06:40:51 +0000 Subject: [PATCH 07/37] Adding protobuf integrations for client, transport, request Signed-off-by: Vacha Shah --- .../src/main/java/org/opensearch/Build.java | 50 + .../src/main/java/org/opensearch/Version.java | 5 + .../ProtobufOpenSearchException.java | 45 + .../action/ProtobufActionRequest.java | 53 + .../action/ProtobufActionRequestBuilder.java | 67 + .../action/ProtobufActionResponse.java | 33 + .../opensearch/action/ProtobufActionType.java | 65 + .../action/ProtobufFailedNodeException.java | 46 + .../cluster/node/info/ProtobufNodeInfo.java | 335 ++++ .../node/info/ProtobufNodesInfoRequest.java | 165 ++ .../node/info/ProtobufNodesInfoResponse.java | 76 + .../node/info/ProtobufPluginsAndModules.java | 77 + .../node/stats/ProtobufNodesStatsRequest.java | 202 +++ .../state/ProtobufClusterStateAction.java | 27 + .../state/ProtobufClusterStateRequest.java | 199 +++ .../ProtobufClusterStateRequestBuilder.java | 119 ++ .../state/ProtobufClusterStateResponse.java | 85 + .../stats/ProtobufCommonStatsFlags.java | 279 +++ .../action/support/IndicesOptions.java | 18 + .../action/support/ProtobufActionFilter.java | 67 + .../support/ProtobufActionFilterChain.java | 30 + .../action/support/ProtobufActionFilters.java | 40 + .../support/ProtobufTransportAction.java | 245 +++ ...terManagerNodeOperationRequestBuilder.java | 76 + ...anagerNodeReadOperationRequestBuilder.java | 39 + ...ProtobufClusterManagerNodeReadRequest.java | 58 + .../ProtobufClusterManagerNodeRequest.java | 108 ++ .../nodes/ProtobufBaseNodeResponse.java | 53 + .../nodes/ProtobufBaseNodesRequest.java | 119 ++ .../nodes/ProtobufBaseNodesResponse.java | 133 ++ .../opensearch/client/ClusterAdminClient.java | 4 + .../client/ProtobufAdminClient.java | 48 + .../org/opensearch/client/ProtobufClient.java | 32 + .../client/ProtobufClusterAdminClient.java | 220 +++ .../client/ProtobufOpenSearchClient.java | 63 + .../client/node/ProtobufNodeClient.java | 139 ++ .../client/support/AbstractClient.java | 19 + .../support/ProtobufAbstractClient.java | 581 ++++++ .../cluster/ProtobufClusterChangedEvent.java | 307 ++++ .../cluster/ProtobufClusterName.java | 61 + .../cluster/ProtobufClusterState.java | 803 +++++++++ .../cluster/ProtobufClusterStateApplier.java | 29 + .../org/opensearch/cluster/ProtobufDiff.java | 29 + .../opensearch/cluster/ProtobufDiffable.java | 30 + .../cluster/ProtobufDiffableUtils.java | 812 +++++++++ .../opensearch/cluster/ProtobufNamedDiff.java | 32 + .../cluster/ProtobufNamedDiffable.java | 24 + .../ProtobufNamedDiffableValueSerializer.java | 59 + .../cluster/node/ProtobufDiscoveryNode.java | 518 ++++++ .../cluster/node/ProtobufDiscoveryNodes.java | 886 ++++++++++ .../cluster/routing/RoutingNodes.java | 59 + .../io/stream/ProtobufNamedWriteable.java | 29 + .../common/io/stream/ProtobufStreamInput.java | 155 ++ .../io/stream/ProtobufStreamOutput.java | 120 ++ .../ProtobufVersionedNamedWriteable.java | 34 + .../opensearch/common/settings/Settings.java | 29 + .../ProtobufBoundTransportAddress.java | 68 + .../transport/ProtobufTransportAddress.java | 108 ++ .../common/unit/ProtobufSizeValue.java | 136 ++ .../common/util/concurrent/ThreadContext.java | 48 +- .../org/opensearch/http/ProtobufHttpInfo.java | 64 + .../opensearch/ingest/ProtobufIngestInfo.java | 59 + .../ingest/ProtobufProcessorInfo.java | 48 + .../monitor/jvm/ProtobufJvmInfo.java | 571 ++++++ .../opensearch/monitor/os/ProtobufOsInfo.java | 103 ++ .../monitor/process/ProtobufProcessInfo.java | 65 + .../node/ProtobufReportingService.java | 29 + .../plugins/ProtobufPluginInfo.java | 350 ++++ .../org/opensearch/rest/BaseRestHandler.java | 29 + .../rest/action/cat/RestNodesAction.java | 36 +- .../support/ProtobufAggregationInfo.java | 71 + .../pipeline/ProtobufSearchPipelineInfo.java | 56 + .../org/opensearch/tasks/ProtobufTask.java | 8 +- .../ProtobufTaskCancellationService.java | 236 +++ .../opensearch/tasks/ProtobufTaskManager.java | 752 ++++++++ .../ProtobufTaskResourceTrackingService.java | 270 +++ .../opensearch/tasks/TaskResultsService.java | 58 + .../threadpool/ProtobufExecutorBuilder.java | 91 + .../ProtobufFixedExecutorBuilder.java | 182 ++ .../ProtobufResizableExecutorBuilder.java | 134 ++ .../ProtobufScalingExecutorBuilder.java | 141 ++ .../threadpool/ProtobufThreadPool.java | 860 +++++++++ .../threadpool/ProtobufThreadPoolInfo.java | 51 + .../transport/ProtobufNetworkMessage.java | 62 + .../transport/ProtobufOutboundMessage.java | 191 ++ .../ProtobufRequestHandlerRegistry.java | 125 ++ .../ProtobufTaskTransportChannel.java | 67 + .../ProtobufTcpTransportChannel.java | 112 ++ .../transport/ProtobufTransport.java | 274 +++ .../transport/ProtobufTransportChannel.java | 56 + .../transport/ProtobufTransportInfo.java | 138 ++ .../ProtobufTransportMessageListener.java | 69 + .../ProtobufTransportRequestHandler.java | 22 + .../transport/ProtobufTransportResponse.java | 59 + .../transport/ProtobufTransportService.java | 1557 +++++++++++++++++ 95 files changed, 15372 insertions(+), 20 deletions(-) create mode 100644 server/src/main/java/org/opensearch/ProtobufOpenSearchException.java create mode 100644 server/src/main/java/org/opensearch/action/ProtobufActionRequest.java create mode 100644 server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/ProtobufActionResponse.java create mode 100644 server/src/main/java/org/opensearch/action/ProtobufActionType.java create mode 100644 server/src/main/java/org/opensearch/action/ProtobufFailedNodeException.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStatsFlags.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufActionFilter.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufActionFilterChain.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufActionFilters.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeOperationRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeReadOperationRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeReadRequest.java create mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeRequest.java create mode 100644 server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodeResponse.java create mode 100644 server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesRequest.java create mode 100644 server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesResponse.java create mode 100644 server/src/main/java/org/opensearch/client/ProtobufAdminClient.java create mode 100644 server/src/main/java/org/opensearch/client/ProtobufClient.java create mode 100644 server/src/main/java/org/opensearch/client/ProtobufClusterAdminClient.java create mode 100644 server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java create mode 100644 server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java create mode 100644 server/src/main/java/org/opensearch/client/support/ProtobufAbstractClient.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufClusterChangedEvent.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufClusterName.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufClusterStateApplier.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufDiff.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufDiffable.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufDiffableUtils.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufNamedDiff.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufNamedDiffable.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufNamedDiffableValueSerializer.java create mode 100644 server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNode.java create mode 100644 server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNodes.java create mode 100644 server/src/main/java/org/opensearch/common/io/stream/ProtobufNamedWriteable.java create mode 100644 server/src/main/java/org/opensearch/common/io/stream/ProtobufVersionedNamedWriteable.java create mode 100644 server/src/main/java/org/opensearch/common/transport/ProtobufBoundTransportAddress.java create mode 100644 server/src/main/java/org/opensearch/common/transport/ProtobufTransportAddress.java create mode 100644 server/src/main/java/org/opensearch/common/unit/ProtobufSizeValue.java create mode 100644 server/src/main/java/org/opensearch/http/ProtobufHttpInfo.java create mode 100644 server/src/main/java/org/opensearch/ingest/ProtobufIngestInfo.java create mode 100644 server/src/main/java/org/opensearch/ingest/ProtobufProcessorInfo.java create mode 100644 server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmInfo.java create mode 100644 server/src/main/java/org/opensearch/monitor/os/ProtobufOsInfo.java create mode 100644 server/src/main/java/org/opensearch/monitor/process/ProtobufProcessInfo.java create mode 100644 server/src/main/java/org/opensearch/node/ProtobufReportingService.java create mode 100644 server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/support/ProtobufAggregationInfo.java create mode 100644 server/src/main/java/org/opensearch/search/pipeline/ProtobufSearchPipelineInfo.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java create mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufExecutorBuilder.java create mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufFixedExecutorBuilder.java create mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufResizableExecutorBuilder.java create mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufScalingExecutorBuilder.java create mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java create mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufNetworkMessage.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTaskTransportChannel.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTcpTransportChannel.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransport.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportChannel.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportInfo.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportResponse.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportService.java diff --git a/libs/core/src/main/java/org/opensearch/Build.java b/libs/core/src/main/java/org/opensearch/Build.java index 67a50a8a31a0e..8a2d3e94bc0ec 100644 --- a/libs/core/src/main/java/org/opensearch/Build.java +++ b/libs/core/src/main/java/org/opensearch/Build.java @@ -32,6 +32,8 @@ package org.opensearch; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; import org.opensearch.common.Booleans; import org.opensearch.core.util.FileSystemUtils; @@ -206,6 +208,54 @@ public String date() { return date; } + public static Build readBuild(StreamInput in) throws IOException { + // the following is new for opensearch: we write the distribution to support any "forks" + final String distribution = in.readString(); + // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know + final Type type = Type.fromDisplayName(in.readString(), false); + String hash = in.readString(); + String date = in.readString(); + boolean snapshot = in.readBoolean(); + final String version = in.readString(); + return new Build(type, hash, date, snapshot, version, distribution); + } + + public static Build readBuildProtobuf(CodedInputStream in) throws IOException { + // the following is new for opensearch: we write the distribution to support any "forks" + final String distribution = in.readString(); + // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know + final Type type = Type.fromDisplayName(in.readString(), false); + String hash = in.readString(); + String date = in.readString(); + boolean snapshot = in.readBool(); + final String version = in.readString(); + return new Build(type, hash, date, snapshot, version, distribution); + } + + public static void writeBuild(Build build, StreamOutput out) throws IOException { + // the following is new for opensearch: we write the distribution name to support any "forks" of the code + out.writeString(build.distribution); + + final Type buildType = build.type(); + out.writeString(buildType.displayName()); + out.writeString(build.hash()); + out.writeString(build.date()); + out.writeBoolean(build.isSnapshot()); + out.writeString(build.getQualifiedVersion()); + } + + public static void writeBuildProtobuf(Build build, CodedOutputStream out) throws IOException { + // the following is new for opensearch: we write the distribution name to support any "forks" of the code + out.writeStringNoTag(build.distribution); + + final Type buildType = build.type(); + out.writeStringNoTag(buildType.displayName()); + out.writeStringNoTag(build.hash()); + out.writeStringNoTag(build.date()); + out.writeBoolNoTag(build.isSnapshot()); + out.writeStringNoTag(build.getQualifiedVersion()); + } + /** * Get the distribution name (expected to be OpenSearch; empty if legacy; something else if forked) * @return distribution name as a string diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 3f83282245fd8..b1cba4f2b69fc 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -32,6 +32,7 @@ package org.opensearch; +import com.google.protobuf.CodedInputStream; import org.opensearch.common.SuppressForbidden; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -95,6 +96,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version CURRENT = V_3_0_0; + public static Version readVersionProtobuf(CodedInputStream in) throws IOException { + return fromId(in.readInt32()); + } + public static Version fromId(int id) { final Version known = LegacyESVersion.idToVersion.get(id); if (known != null) { diff --git a/server/src/main/java/org/opensearch/ProtobufOpenSearchException.java b/server/src/main/java/org/opensearch/ProtobufOpenSearchException.java new file mode 100644 index 0000000000000..30c9e8be2cfb6 --- /dev/null +++ b/server/src/main/java/org/opensearch/ProtobufOpenSearchException.java @@ -0,0 +1,45 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; + +import java.io.IOException; + +/** + * Base exception for a failed node +* +* @opensearch.internal +*/ +public class ProtobufOpenSearchException extends RuntimeException implements ProtobufWriteable { + + private String message; + + public ProtobufOpenSearchException(String message) { + super(message); + this.message = message; + } + + public ProtobufOpenSearchException(CodedInputStream in) throws IOException { + super(in.readString()); + this.message = in.readString(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeStringNoTag(this.getMessage()); + } + + public String getMessage() { + return this.message; + } + +} diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionRequest.java b/server/src/main/java/org/opensearch/action/ProtobufActionRequest.java new file mode 100644 index 0000000000000..013ac5cacb776 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ProtobufActionRequest.java @@ -0,0 +1,53 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.action; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.transport.ProtobufTransportRequest; + +import java.io.IOException; + +/** + * Base action request implemented by plugins. +* +* @opensearch.api +*/ +public abstract class ProtobufActionRequest extends ProtobufTransportRequest { + + public ProtobufActionRequest() { + super(); + // this does not set the listenerThreaded API, if needed, its up to the caller to set it + // since most times, we actually want it to not be threaded... + // this.listenerThreaded = request.listenerThreaded(); + } + + public ProtobufActionRequest(CodedInputStream in) throws IOException { + super(in); + } + + public abstract ActionRequestValidationException validate(); + + /** + * Should this task store its result after it has finished? + */ + public boolean getShouldStoreResult() { + return false; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java b/server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java new file mode 100644 index 0000000000000..33c0e46f5fc92 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java @@ -0,0 +1,67 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action; + +import org.opensearch.client.OpenSearchClient; +import org.opensearch.client.ProtobufOpenSearchClient; +import org.opensearch.common.unit.TimeValue; + +import java.util.Objects; + +/** + * Base Action Request Builder +* +* @opensearch.api +*/ +public abstract class ProtobufActionRequestBuilder { + + protected final ProtobufActionType action; + protected final Request request; + protected final ProtobufOpenSearchClient client; + + protected ProtobufActionRequestBuilder(ProtobufOpenSearchClient client, ProtobufActionType action, Request request) { + Objects.requireNonNull(action, "action must not be null"); + this.action = action; + this.request = request; + this.client = client; + } + + public Request request() { + return this.request; + } + + public ActionFuture execute() { + return client.execute(action, request); + } + + /** + * Short version of execute().actionGet(). + */ + public Response get() { + return execute().actionGet(); + } + + /** + * Short version of execute().actionGet(). + */ + public Response get(TimeValue timeout) { + return execute().actionGet(timeout); + } + + /** + * Short version of execute().actionGet(). + */ + public Response get(String timeout) { + return execute().actionGet(timeout); + } + + public void execute(ActionListener listener) { + client.execute(action, request, listener); + } +} diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionResponse.java b/server/src/main/java/org/opensearch/action/ProtobufActionResponse.java new file mode 100644 index 0000000000000..f6cf420cb86fd --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ProtobufActionResponse.java @@ -0,0 +1,33 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.action; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.transport.ProtobufTransportResponse; + +import java.io.IOException; + +/** + * Base class for responses to action requests implemented by plugins. +* +* @opensearch.api +*/ +public abstract class ProtobufActionResponse extends ProtobufTransportResponse { + + public ProtobufActionResponse() {} + + public ProtobufActionResponse(CodedInputStream in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionType.java b/server/src/main/java/org/opensearch/action/ProtobufActionType.java new file mode 100644 index 0000000000000..911419fb67677 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ProtobufActionType.java @@ -0,0 +1,65 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action; + +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.settings.Settings; +import org.opensearch.transport.TransportRequestOptions; + +/** + * A generic action. Should strive to make it a singleton. +* +* @opensearch.api +*/ +public class ProtobufActionType { + + private final String name; + private final ProtobufWriteable.Reader responseReader; + + /** + * @param name The name of the action, must be unique across actions. + * @param responseReader A reader for the response type + */ + public ProtobufActionType(String name, ProtobufWriteable.Reader responseReader) { + this.name = name; + this.responseReader = responseReader; + } + + /** + * The name of the action. Must be unique across actions. + */ + public String name() { + return this.name; + } + + /** + * Get a reader that can create a new instance of the class from a {@link org.opensearch.common.io.stream.StreamInput} + */ + public ProtobufWriteable.Reader getResponseReader() { + return responseReader; + } + + /** + * Optional request options for the action. + */ + public TransportRequestOptions transportOptions(Settings settings) { + return TransportRequestOptions.EMPTY; + } + + @Override + public boolean equals(Object o) { + return o instanceof ProtobufActionType && name.equals(((ProtobufActionType) o).name()); + } + + @Override + public int hashCode() { + return name.hashCode(); + } +} diff --git a/server/src/main/java/org/opensearch/action/ProtobufFailedNodeException.java b/server/src/main/java/org/opensearch/action/ProtobufFailedNodeException.java new file mode 100644 index 0000000000000..511d933391c00 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ProtobufFailedNodeException.java @@ -0,0 +1,46 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.ProtobufOpenSearchException; +import org.opensearch.common.io.stream.ProtobufWriteable; + +import java.io.IOException; + +/** + * Base exception for a failed node +* +* @opensearch.internal +*/ +public class ProtobufFailedNodeException extends ProtobufOpenSearchException implements ProtobufWriteable { + + private final String nodeId; + + public ProtobufFailedNodeException(String nodeId, String msg, Throwable cause) { + super(msg); + this.nodeId = nodeId; + } + + public String nodeId() { + return this.nodeId; + } + + public ProtobufFailedNodeException(CodedInputStream in) throws IOException { + super(in); + nodeId = in.readString(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + out.writeStringNoTag(nodeId); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java new file mode 100644 index 0000000000000..0d1b03ae29d07 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java @@ -0,0 +1,335 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.action.admin.cluster.node.info; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.Build; +import org.opensearch.Version; +import org.opensearch.action.support.nodes.ProtobufBaseNodeResponse; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.http.ProtobufHttpInfo; +import org.opensearch.ingest.ProtobufIngestInfo; +import org.opensearch.monitor.jvm.JvmInfo; +import org.opensearch.monitor.jvm.ProtobufJvmInfo; +import org.opensearch.monitor.os.OsInfo; +import org.opensearch.monitor.os.ProtobufOsInfo; +import org.opensearch.monitor.process.ProtobufProcessInfo; +import org.opensearch.node.ProtobufReportingService; +import org.opensearch.search.aggregations.support.ProtobufAggregationInfo; +import org.opensearch.search.pipeline.ProtobufSearchPipelineInfo; +import org.opensearch.threadpool.ProtobufThreadPoolInfo; +import org.opensearch.transport.ProtobufTransportInfo; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * Node information (static, does not change over time). +* +* @opensearch.internal +*/ +public class ProtobufNodeInfo extends ProtobufBaseNodeResponse { + + private Version version; + private Build build; + + @Nullable + private Settings settings; + + /** + * Do not expose this map to other classes. For type safety, use {@link #getInfo(Class)} + * to retrieve items from this map and {@link #addInfoIfNonNull(Class, ProtobufReportingService.ProtobufInfo)} + * to retrieve items from it. + */ + private Map, ProtobufReportingService.ProtobufInfo> infoMap = new HashMap<>(); + + @Nullable + private ByteSizeValue totalIndexingBuffer; + + public ProtobufNodeInfo(CodedInputStream in) throws IOException { + super(in); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + version = Version.readVersionProtobuf(in); + build = Build.readBuildProtobuf(in); + if (in.readBool()) { + totalIndexingBuffer = new ByteSizeValue(in.readInt64()); + } else { + totalIndexingBuffer = null; + } + if (in.readBool()) { + settings = Settings.readSettingsFromStreamProtobuf(in); + } + addInfoIfNonNull(ProtobufOsInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufOsInfo::new, in)); + addInfoIfNonNull(ProtobufProcessInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufProcessInfo::new, in)); + addInfoIfNonNull(ProtobufJvmInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufJvmInfo::new, in)); + addInfoIfNonNull(ProtobufThreadPoolInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufThreadPoolInfo::new, in)); + addInfoIfNonNull(ProtobufTransportInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufTransportInfo::new, in)); + addInfoIfNonNull(ProtobufHttpInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufHttpInfo::new, in)); + addInfoIfNonNull(ProtobufPluginsAndModules.class, protobufStreamInput.readOptionalWriteable(ProtobufPluginsAndModules::new, in)); + addInfoIfNonNull(ProtobufIngestInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufIngestInfo::new, in)); + addInfoIfNonNull(ProtobufAggregationInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufAggregationInfo::new, in)); + if (protobufStreamInput.getVersion().onOrAfter(Version.V_2_7_0)) { + addInfoIfNonNull( + ProtobufSearchPipelineInfo.class, + protobufStreamInput.readOptionalWriteable(ProtobufSearchPipelineInfo::new, in) + ); + } + } + + public ProtobufNodeInfo( + Version version, + Build build, + ProtobufDiscoveryNode node, + @Nullable Settings settings, + @Nullable ProtobufOsInfo os, + @Nullable ProtobufProcessInfo process, + @Nullable ProtobufJvmInfo jvm, + @Nullable ProtobufThreadPoolInfo threadPool, + @Nullable ProtobufTransportInfo transport, + @Nullable ProtobufHttpInfo http, + @Nullable ProtobufPluginsAndModules plugins, + @Nullable ProtobufIngestInfo ingest, + @Nullable ProtobufAggregationInfo aggsInfo, + @Nullable ByteSizeValue totalIndexingBuffer, + @Nullable ProtobufSearchPipelineInfo ProtobufSearchPipelineInfo + ) { + super(node); + this.version = version; + this.build = build; + this.settings = settings; + addInfoIfNonNull(ProtobufOsInfo.class, os); + addInfoIfNonNull(ProtobufProcessInfo.class, process); + addInfoIfNonNull(ProtobufJvmInfo.class, jvm); + addInfoIfNonNull(ProtobufThreadPoolInfo.class, threadPool); + addInfoIfNonNull(ProtobufTransportInfo.class, transport); + addInfoIfNonNull(ProtobufHttpInfo.class, http); + addInfoIfNonNull(ProtobufPluginsAndModules.class, plugins); + addInfoIfNonNull(ProtobufIngestInfo.class, ingest); + addInfoIfNonNull(ProtobufAggregationInfo.class, aggsInfo); + addInfoIfNonNull(ProtobufSearchPipelineInfo.class, ProtobufSearchPipelineInfo); + this.totalIndexingBuffer = totalIndexingBuffer; + } + + /** + * System's hostname. null in case of UnknownHostException + */ + @Nullable + public String getHostname() { + return getNode().getHostName(); + } + + /** + * The current OpenSearch version + */ + public Version getVersion() { + return version; + } + + /** + * The build version of the node. + */ + public Build getBuild() { + return this.build; + } + + /** + * The settings of the node. + */ + @Nullable + public Settings getSettings() { + return this.settings; + } + + /** + * Get a particular info object, e.g. {@link JvmInfo} or {@link OsInfo}. This + * generic method handles all casting in order to spare client classes the + * work of explicit casts. This {@link NodeInfo} class guarantees type + * safety for these stored info blocks. + * + * @param clazz Class for retrieval. + * @param Specific subtype of ReportingService.ProtobufInfo to retrieve. + * @return An object of type T. + */ + public T getInfo(Class clazz) { + return clazz.cast(infoMap.get(clazz)); + } + + @Nullable + public ByteSizeValue getTotalIndexingBuffer() { + return totalIndexingBuffer; + } + + /** + * Add a value to the map of information blocks. This method guarantees the + * type safety of the storage of heterogeneous types of reporting service information. + */ + private void addInfoIfNonNull(Class clazz, T info) { + if (info != null) { + infoMap.put(clazz, info); + } + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + out.writeInt32NoTag(version.id); + Build.writeBuildProtobuf(build, out); + if (totalIndexingBuffer == null) { + out.writeBoolNoTag(false); + } else { + out.writeBoolNoTag(true); + out.writeInt64NoTag(totalIndexingBuffer.getBytes()); + } + if (settings == null) { + out.writeBoolNoTag(false); + } else { + out.writeBoolNoTag(true); + Settings.writeSettingsToStreamProtobuf(settings, out); + } + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufOsInfo.class), out); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufProcessInfo.class), out); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufJvmInfo.class), out); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufThreadPoolInfo.class), out); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufTransportInfo.class), out); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufHttpInfo.class), out); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufPluginsAndModules.class), out); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufIngestInfo.class), out); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufAggregationInfo.class), out); + if (protobufStreamOutput.getVersion().onOrAfter(Version.V_2_7_0)) { + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufSearchPipelineInfo.class), out); + } + } + + public static ProtobufNodeInfo.Builder builder(Version version, Build build, ProtobufDiscoveryNode node) { + return new Builder(version, build, node); + } + + /** + * Builder class to accommodate new Info types being added to NodeInfo. + */ + public static class Builder { + private final Version version; + private final Build build; + private final ProtobufDiscoveryNode node; + + private Builder(Version version, Build build, ProtobufDiscoveryNode node) { + this.version = version; + this.build = build; + this.node = node; + } + + private Settings settings; + private ProtobufOsInfo os; + private ProtobufProcessInfo process; + private ProtobufJvmInfo jvm; + private ProtobufThreadPoolInfo threadPool; + private ProtobufTransportInfo transport; + private ProtobufHttpInfo http; + private ProtobufPluginsAndModules plugins; + private ProtobufIngestInfo ingest; + private ProtobufAggregationInfo aggsInfo; + private ByteSizeValue totalIndexingBuffer; + private ProtobufSearchPipelineInfo ProtobufSearchPipelineInfo; + + public Builder setSettings(Settings settings) { + this.settings = settings; + return this; + } + + public Builder setOs(ProtobufOsInfo os) { + this.os = os; + return this; + } + + public Builder setProcess(ProtobufProcessInfo process) { + this.process = process; + return this; + } + + public Builder setJvm(ProtobufJvmInfo jvm) { + this.jvm = jvm; + return this; + } + + public Builder setThreadPool(ProtobufThreadPoolInfo threadPool) { + this.threadPool = threadPool; + return this; + } + + public Builder setTransport(ProtobufTransportInfo transport) { + this.transport = transport; + return this; + } + + public Builder setHttp(ProtobufHttpInfo http) { + this.http = http; + return this; + } + + public Builder setPlugins(ProtobufPluginsAndModules plugins) { + this.plugins = plugins; + return this; + } + + public Builder setIngest(ProtobufIngestInfo ingest) { + this.ingest = ingest; + return this; + } + + public Builder setAggsInfo(ProtobufAggregationInfo aggsInfo) { + this.aggsInfo = aggsInfo; + return this; + } + + public Builder setTotalIndexingBuffer(ByteSizeValue totalIndexingBuffer) { + this.totalIndexingBuffer = totalIndexingBuffer; + return this; + } + + public Builder setProtobufSearchPipelineInfo(ProtobufSearchPipelineInfo ProtobufSearchPipelineInfo) { + this.ProtobufSearchPipelineInfo = ProtobufSearchPipelineInfo; + return this; + } + + public ProtobufNodeInfo build() { + return new ProtobufNodeInfo( + version, + build, + node, + settings, + os, + process, + jvm, + threadPool, + transport, + http, + plugins, + ingest, + aggsInfo, + totalIndexingBuffer, + ProtobufSearchPipelineInfo + ); + } + + } + +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java new file mode 100644 index 0000000000000..04de76bb281fe --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java @@ -0,0 +1,165 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.admin.cluster.node.info; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.action.support.nodes.ProtobufBaseNodesRequest; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; + +/** + * A request to get node (cluster) level information. + * + * @opensearch.internal + */ +public class ProtobufNodesInfoRequest extends ProtobufBaseNodesRequest { + + private Set requestedMetrics = Metric.allMetrics(); + + /** + * Create a new NodeInfoRequest from a {@link StreamInput} object. + * + * @param in A stream input object. + * @throws IOException if the stream cannot be deserialized. + */ + public ProtobufNodesInfoRequest(CodedInputStream in) throws IOException { + super(in); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + requestedMetrics.clear(); + requestedMetrics.addAll(Arrays.asList(protobufStreamInput.readStringArray(in))); + } + + /** + * Get information from nodes based on the nodes ids specified. If none are passed, information + * for all nodes will be returned. + */ + public ProtobufNodesInfoRequest(String... nodesIds) { + super(nodesIds); + all(); + } + + /** + * Clears all info flags. + */ + public ProtobufNodesInfoRequest clear() { + requestedMetrics.clear(); + return this; + } + + /** + * Sets to return all the data. + */ + public ProtobufNodesInfoRequest all() { + requestedMetrics.addAll(Metric.allMetrics()); + return this; + } + + /** + * Get the names of requested metrics + */ + public Set requestedMetrics() { + return new HashSet<>(requestedMetrics); + } + + /** + * Add metric + */ + public ProtobufNodesInfoRequest addMetric(String metric) { + if (Metric.allMetrics().contains(metric) == false) { + throw new IllegalStateException("Used an illegal metric: " + metric); + } + requestedMetrics.add(metric); + return this; + } + + /** + * Add multiple metrics + */ + public ProtobufNodesInfoRequest addMetrics(String... metrics) { + SortedSet metricsSet = new TreeSet<>(Arrays.asList(metrics)); + if (Metric.allMetrics().containsAll(metricsSet) == false) { + metricsSet.removeAll(Metric.allMetrics()); + String plural = metricsSet.size() == 1 ? "" : "s"; + throw new IllegalStateException("Used illegal metric" + plural + ": " + metricsSet); + } + requestedMetrics.addAll(metricsSet); + return this; + } + + /** + * Remove metric + */ + public ProtobufNodesInfoRequest removeMetric(String metric) { + if (Metric.allMetrics().contains(metric) == false) { + throw new IllegalStateException("Used an illegal metric: " + metric); + } + requestedMetrics.remove(metric); + return this; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + protobufStreamOutput.writeStringArray(requestedMetrics.toArray(new String[0]), out); + } + + /** + * An enumeration of the "core" sections of metrics that may be requested + * from the nodes information endpoint. Eventually this list list will be + * pluggable. + */ + public enum Metric { + SETTINGS("settings"), + OS("os"), + PROCESS("process"), + JVM("jvm"), + THREAD_POOL("thread_pool"), + TRANSPORT("transport"), + HTTP("http"), + PLUGINS("plugins"), + INGEST("ingest"), + AGGREGATIONS("aggregations"), + INDICES("indices"), + SEARCH_PIPELINES("search_pipelines"); + + private String metricName; + + Metric(String name) { + this.metricName = name; + } + + public String metricName() { + return this.metricName; + } + + boolean containedIn(Set metricNames) { + return metricNames.contains(this.metricName()); + } + + public static Set allMetrics() { + return Arrays.stream(values()).map(Metric::metricName).collect(Collectors.toSet()); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java new file mode 100644 index 0000000000000..a7d5274fcf09a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java @@ -0,0 +1,76 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Licensed to Elasticsearch under one or more contributor +* license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright +* ownership. Elasticsearch licenses this file to you under +* the Apache License, Version 2.0 (the "License"); you may +* not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, +* software distributed under the License is distributed on an +* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +* KIND, either express or implied. See the License for the +* specific language governing permissions and limitations +* under the License. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.action.admin.cluster.node.info; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.action.ProtobufFailedNodeException; +import org.opensearch.action.support.nodes.ProtobufBaseNodesResponse; +import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; + +import java.io.IOException; +import java.util.List; + +/** + * Transport response for OpenSearch Node Information +* +* @opensearch.internal +*/ +public class ProtobufNodesInfoResponse extends ProtobufBaseNodesResponse { + + public ProtobufNodesInfoResponse(CodedInputStream in) throws IOException { + super(in); + } + + public ProtobufNodesInfoResponse( + ProtobufClusterName clusterName, + List nodes, + List failures + ) { + super(clusterName, nodes, failures); + } + + @Override + protected List readNodesFrom(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + return protobufStreamInput.readList(ProtobufNodeInfo::new, in); + } + + @Override + protected void writeNodesTo(CodedOutputStream out, List nodes) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + protobufStreamOutput.writeCollection(nodes, (o, v) -> v.writeTo(o), out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java new file mode 100644 index 0000000000000..1df8383a78736 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java @@ -0,0 +1,77 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.info; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.node.ProtobufReportingService; +import org.opensearch.plugins.ProtobufPluginInfo; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +/** + * Information about plugins and modules +* +* @opensearch.internal +*/ +public class ProtobufPluginsAndModules implements ProtobufReportingService.ProtobufInfo { + private final List plugins; + private final List modules; + + public ProtobufPluginsAndModules(List plugins, List modules) { + this.plugins = Collections.unmodifiableList(plugins); + this.modules = Collections.unmodifiableList(modules); + } + + public ProtobufPluginsAndModules(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + this.plugins = Collections.unmodifiableList(protobufStreamInput.readList(ProtobufPluginInfo::new, in)); + this.modules = Collections.unmodifiableList(protobufStreamInput.readList(ProtobufPluginInfo::new, in)); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + protobufStreamOutput.writeCollection(plugins, (o, v) -> v.writeTo(o), out); + protobufStreamOutput.writeCollection(modules, (o, v) -> v.writeTo(o), out); + } + + /** + * Returns an ordered list based on plugins name + */ + public List getPluginInfos() { + List plugins = new ArrayList<>(this.plugins); + Collections.sort(plugins, Comparator.comparing(ProtobufPluginInfo::getName)); + return plugins; + } + + /** + * Returns an ordered list based on modules name + */ + public List getModuleInfos() { + List modules = new ArrayList<>(this.modules); + Collections.sort(modules, Comparator.comparing(ProtobufPluginInfo::getName)); + return modules; + } + + public void addPlugin(ProtobufPluginInfo info) { + plugins.add(info); + } + + public void addModule(ProtobufPluginInfo info) { + modules.add(info); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java new file mode 100644 index 0000000000000..d55b511eb0170 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java @@ -0,0 +1,202 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.stats; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.admin.indices.stats.ProtobufCommonStatsFlags; +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.action.support.nodes.ProtobufBaseNodesRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; + +/** + * A request to get node (cluster) level stats. +* +* @opensearch.internal +*/ +public class ProtobufNodesStatsRequest extends ProtobufBaseNodesRequest { + + private ProtobufCommonStatsFlags indices = new ProtobufCommonStatsFlags(); + private final Set requestedMetrics = new HashSet<>(); + + public ProtobufNodesStatsRequest() { + super((String[]) null); + } + + public ProtobufNodesStatsRequest(CodedInputStream in) throws IOException { + super(in); + + indices = new ProtobufCommonStatsFlags(in); + requestedMetrics.clear(); + requestedMetrics.addAll(in.readStringList()); + } + + /** + * Get stats from nodes based on the nodes ids specified. If none are passed, stats + * for all nodes will be returned. + */ + public ProtobufNodesStatsRequest(String... nodesIds) { + super(nodesIds); + } + + /** + * Sets all the request flags. + */ + public ProtobufNodesStatsRequest all() { + this.indices.all(); + this.requestedMetrics.addAll(Metric.allMetrics()); + return this; + } + + /** + * Clears all the request flags. + */ + public ProtobufNodesStatsRequest clear() { + this.indices.clear(); + this.requestedMetrics.clear(); + return this; + } + + /** + * Get indices. Handles separately from other metrics because it may or + * may not have submetrics. + * @return flags indicating which indices stats to return + */ + public ProtobufCommonStatsFlags indices() { + return indices; + } + + /** + * Set indices. Handles separately from other metrics because it may or + * may not involve submetrics. + * @param indices flags indicating which indices stats to return + * @return This object, for request chaining. + */ + public ProtobufNodesStatsRequest indices(ProtobufCommonStatsFlags indices) { + this.indices = indices; + return this; + } + + /** + * Should indices stats be returned. + */ + public ProtobufNodesStatsRequest indices(boolean indices) { + if (indices) { + this.indices.all(); + } else { + this.indices.clear(); + } + return this; + } + + /** + * Get the names of requested metrics, excluding indices, which are + * handled separately. + */ + public Set requestedMetrics() { + return new HashSet<>(requestedMetrics); + } + + /** + * Add metric + */ + public ProtobufNodesStatsRequest addMetric(String metric) { + if (Metric.allMetrics().contains(metric) == false) { + throw new IllegalStateException("Used an illegal metric: " + metric); + } + requestedMetrics.add(metric); + return this; + } + + /** + * Add an array of metric names + */ + public ProtobufNodesStatsRequest addMetrics(String... metrics) { + // use sorted set for reliable ordering in error messages + SortedSet metricsSet = new TreeSet<>(Arrays.asList(metrics)); + if (Metric.allMetrics().containsAll(metricsSet) == false) { + metricsSet.removeAll(Metric.allMetrics()); + String plural = metricsSet.size() == 1 ? "" : "s"; + throw new IllegalStateException("Used illegal metric" + plural + ": " + metricsSet); + } + requestedMetrics.addAll(metricsSet); + return this; + } + + /** + * Remove metric + */ + public ProtobufNodesStatsRequest removeMetric(String metric) { + if (Metric.allMetrics().contains(metric) == false) { + throw new IllegalStateException("Used an illegal metric: " + metric); + } + requestedMetrics.remove(metric); + return this; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + indices.writeTo(out); + out.writeStringArray(requestedMetrics.toArray(new String[0])); + } + + /** + * An enumeration of the "core" sections of metrics that may be requested + * from the nodes stats endpoint. Eventually this list will be pluggable. + */ + public enum Metric { + OS("os"), + PROCESS("process"), + JVM("jvm"), + THREAD_POOL("thread_pool"), + FS("fs"), + TRANSPORT("transport"), + HTTP("http"), + BREAKER("breaker"), + SCRIPT("script"), + DISCOVERY("discovery"), + INGEST("ingest"), + ADAPTIVE_SELECTION("adaptive_selection"), + SCRIPT_CACHE("script_cache"), + INDEXING_PRESSURE("indexing_pressure"), + SHARD_INDEXING_PRESSURE("shard_indexing_pressure"), + SEARCH_BACKPRESSURE("search_backpressure"), + CLUSTER_MANAGER_THROTTLING("cluster_manager_throttling"), + WEIGHTED_ROUTING_STATS("weighted_routing"), + FILE_CACHE_STATS("file_cache"); + + private String metricName; + + Metric(String name) { + this.metricName = name; + } + + public String metricName() { + return this.metricName; + } + + boolean containedIn(Set metricNames) { + return metricNames.contains(this.metricName()); + } + + static Set allMetrics() { + return Arrays.stream(values()).map(Metric::metricName).collect(Collectors.toSet()); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java new file mode 100644 index 0000000000000..5ab9aa023a298 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java @@ -0,0 +1,27 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.state; + +import org.opensearch.action.ActionType; +import org.opensearch.action.ProtobufActionType; + +/** + * Transport action for obtaining cluster state +* +* @opensearch.internal +*/ +public class ProtobufClusterStateAction extends ProtobufActionType { + + public static final ProtobufClusterStateAction INSTANCE = new ProtobufClusterStateAction(); + public static final String NAME = "cluster:monitor/state"; + + private ProtobufClusterStateAction() { + super(NAME, ProtobufClusterStateResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java new file mode 100644 index 0000000000000..6798c3b8c46e5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java @@ -0,0 +1,199 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.action.admin.cluster.state; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.clustermanager.ProtobufClusterManagerNodeReadRequest; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; + +/** + * Transport request for obtaining cluster state +* +* @opensearch.internal +*/ +public class ProtobufClusterStateRequest extends ProtobufClusterManagerNodeReadRequest + implements + IndicesRequest.Replaceable { + + public static final TimeValue DEFAULT_WAIT_FOR_NODE_TIMEOUT = TimeValue.timeValueMinutes(1); + + private boolean routingTable = true; + private boolean nodes = true; + private boolean metadata = true; + private boolean blocks = true; + private boolean customs = true; + private Long waitForMetadataVersion; + private TimeValue waitForTimeout = DEFAULT_WAIT_FOR_NODE_TIMEOUT; + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.lenientExpandOpen(); + + public ProtobufClusterStateRequest() {} + + public ProtobufClusterStateRequest(CodedInputStream in) throws IOException { + super(in); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + routingTable = in.readBool(); + nodes = in.readBool(); + metadata = in.readBool(); + blocks = in.readBool(); + customs = in.readBool(); + indices = protobufStreamInput.readStringArray(in); + indicesOptions = IndicesOptions.readIndicesOptionsProtobuf(in); + waitForTimeout = protobufStreamInput.readTimeValue(in); + waitForMetadataVersion = protobufStreamInput.readOptionalLong(in); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + out.writeBoolNoTag(routingTable); + out.writeBoolNoTag(nodes); + out.writeBoolNoTag(metadata); + out.writeBoolNoTag(blocks); + out.writeBoolNoTag(customs); + protobufStreamOutput.writeStringArray(indices, out); + indicesOptions.writeIndicesOptionsProtobuf(out); + protobufStreamOutput.writeTimeValue(waitForTimeout, out); + protobufStreamOutput.writeOptionalLong(waitForMetadataVersion, out); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public ProtobufClusterStateRequest all() { + routingTable = true; + nodes = true; + metadata = true; + blocks = true; + customs = true; + indices = Strings.EMPTY_ARRAY; + return this; + } + + public ProtobufClusterStateRequest clear() { + routingTable = false; + nodes = false; + metadata = false; + blocks = false; + customs = false; + indices = Strings.EMPTY_ARRAY; + return this; + } + + public boolean routingTable() { + return routingTable; + } + + public ProtobufClusterStateRequest routingTable(boolean routingTable) { + this.routingTable = routingTable; + return this; + } + + public boolean nodes() { + return nodes; + } + + public ProtobufClusterStateRequest nodes(boolean nodes) { + this.nodes = nodes; + return this; + } + + public boolean metadata() { + return metadata; + } + + public ProtobufClusterStateRequest metadata(boolean metadata) { + this.metadata = metadata; + return this; + } + + public boolean blocks() { + return blocks; + } + + public ProtobufClusterStateRequest blocks(boolean blocks) { + this.blocks = blocks; + return this; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public ProtobufClusterStateRequest indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public IndicesOptions indicesOptions() { + return this.indicesOptions; + } + + public final ProtobufClusterStateRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + @Override + public boolean includeDataStreams() { + return true; + } + + public ProtobufClusterStateRequest customs(boolean customs) { + this.customs = customs; + return this; + } + + public boolean customs() { + return customs; + } + + public TimeValue waitForTimeout() { + return waitForTimeout; + } + + public ProtobufClusterStateRequest waitForTimeout(TimeValue waitForTimeout) { + this.waitForTimeout = waitForTimeout; + return this; + } + + public Long waitForMetadataVersion() { + return waitForMetadataVersion; + } + + public ProtobufClusterStateRequest waitForMetadataVersion(long waitForMetadataVersion) { + if (waitForMetadataVersion < 1) { + throw new IllegalArgumentException( + "provided waitForMetadataVersion should be >= 1, but instead is [" + waitForMetadataVersion + "]" + ); + } + this.waitForMetadataVersion = waitForMetadataVersion; + return this; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequestBuilder.java new file mode 100644 index 0000000000000..6255d47b9bd66 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequestBuilder.java @@ -0,0 +1,119 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.state; + +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ProtobufClusterManagerNodeReadOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; +import org.opensearch.client.ProtobufOpenSearchClient; +import org.opensearch.common.unit.TimeValue; + +/** + * Transport request builder for obtaining cluster state +* +* @opensearch.internal +*/ +public class ProtobufClusterStateRequestBuilder extends ProtobufClusterManagerNodeReadOperationRequestBuilder< + ProtobufClusterStateRequest, + ProtobufClusterStateResponse, + ProtobufClusterStateRequestBuilder> { + + public ProtobufClusterStateRequestBuilder(ProtobufOpenSearchClient client, ProtobufClusterStateAction action) { + super(client, action, new ProtobufClusterStateRequest()); + } + + /** + * Include all data + */ + public ProtobufClusterStateRequestBuilder all() { + request.all(); + return this; + } + + /** + * Do not include any data + */ + public ProtobufClusterStateRequestBuilder clear() { + request.clear(); + return this; + } + + public ProtobufClusterStateRequestBuilder setBlocks(boolean filter) { + request.blocks(filter); + return this; + } + + /** + * Should the cluster state result include the {@link org.opensearch.cluster.metadata.Metadata}. Defaults + * to {@code true}. + */ + public ProtobufClusterStateRequestBuilder setMetadata(boolean filter) { + request.metadata(filter); + return this; + } + + /** + * Should the cluster state result include the {@link org.opensearch.cluster.node.DiscoveryNodes}. Defaults + * to {@code true}. + */ + public ProtobufClusterStateRequestBuilder setNodes(boolean filter) { + request.nodes(filter); + return this; + } + + /** + * Should the cluster state result include the {@link org.opensearch.cluster.ClusterState.Custom}. Defaults + * to {@code true}. + */ + public ProtobufClusterStateRequestBuilder setCustoms(boolean filter) { + request.customs(filter); + return this; + } + + /** + * Should the cluster state result include the {@link org.opensearch.cluster.routing.RoutingTable}. Defaults + * to {@code true}. + */ + public ProtobufClusterStateRequestBuilder setRoutingTable(boolean filter) { + request.routingTable(filter); + return this; + } + + /** + * When {@link #setMetadata(boolean)} is set, which indices to return the {@link org.opensearch.cluster.metadata.IndexMetadata} + * for. Defaults to all indices. + */ + public ProtobufClusterStateRequestBuilder setIndices(String... indices) { + request.indices(indices); + return this; + } + + public ProtobufClusterStateRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { + request.indicesOptions(indicesOptions); + return this; + } + + /** + * Causes the request to wait for the metadata version to advance to at least the given version. + * @param waitForMetadataVersion The metadata version for which to wait + */ + public ProtobufClusterStateRequestBuilder setWaitForMetadataVersion(long waitForMetadataVersion) { + request.waitForMetadataVersion(waitForMetadataVersion); + return this; + } + + /** + * If {@link ProtobufClusterStateRequest#waitForMetadataVersion()} is set then this determines how long to wait + */ + public ProtobufClusterStateRequestBuilder setWaitForTimeOut(TimeValue waitForTimeout) { + request.waitForTimeout(waitForTimeout); + return this; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java new file mode 100644 index 0000000000000..a88e42d78c66a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java @@ -0,0 +1,85 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.action.admin.cluster.state; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.ProtobufClusterState; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; + +import java.io.IOException; + +/** + * The response for getting the cluster state. +* +* @opensearch.internal +*/ +public class ProtobufClusterStateResponse extends ProtobufActionResponse { + + private ProtobufClusterName clusterName; + private ProtobufClusterState clusterState; + private boolean waitForTimedOut = false; + + private final ProtobufStreamInput protobufStreamInput; + + public ProtobufClusterStateResponse(CodedInputStream in) throws IOException { + super(in); + protobufStreamInput = new ProtobufStreamInput(); + clusterName = new ProtobufClusterName(in); + clusterState = protobufStreamInput.readOptionalWriteable(innerIn -> ProtobufClusterState.readFrom(innerIn, null), in); + waitForTimedOut = in.readBool(); + } + + public ProtobufClusterStateResponse(ProtobufClusterName clusterName, ProtobufClusterState clusterState, boolean waitForTimedOut) { + this.clusterName = clusterName; + this.clusterState = clusterState; + this.waitForTimedOut = waitForTimedOut; + protobufStreamInput = new ProtobufStreamInput(); + } + + /** + * The requested cluster state. Only the parts of the cluster state that were + * requested are included in the returned {@link ClusterState} instance. + */ + public ProtobufClusterState getState() { + return this.clusterState; + } + + /** + * The name of the cluster. + */ + public ProtobufClusterName getClusterName() { + return this.clusterName; + } + + /** + * Returns whether the request timed out waiting for a cluster state with a metadata version equal or + * higher than the specified metadata. + */ + public boolean isWaitForTimedOut() { + return waitForTimedOut; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + clusterName.writeTo(out); + protobufStreamOutput.writeOptionalWriteable(clusterState, out); + out.writeBoolNoTag(waitForTimedOut); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStatsFlags.java new file mode 100644 index 0000000000000..82df111cceda4 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStatsFlags.java @@ -0,0 +1,279 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.indices.stats; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.Version; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Collections; +import java.util.EnumSet; + +/** + * Common Stats Flags for OpenSearch +* +* @opensearch.internal +*/ +public class ProtobufCommonStatsFlags implements ProtobufWriteable, Cloneable { + + public static final CommonStatsFlags ALL = new CommonStatsFlags().all(); + public static final CommonStatsFlags NONE = new CommonStatsFlags().clear(); + + private EnumSet flags = EnumSet.allOf(Flag.class); + private String[] groups = null; + private String[] fieldDataFields = null; + private String[] completionDataFields = null; + private boolean includeSegmentFileSizes = false; + private boolean includeUnloadedSegments = false; + private boolean includeAllShardIndexingPressureTrackers = false; + private boolean includeOnlyTopIndexingPressureMetrics = false; + + /** + * @param flags flags to set. If no flags are supplied, default flags will be set. + */ + public ProtobufCommonStatsFlags(Flag... flags) { + if (flags.length > 0) { + clear(); + Collections.addAll(this.flags, flags); + } + } + + public ProtobufCommonStatsFlags(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + final long longFlags = in.readLong(); + flags.clear(); + for (Flag flag : Flag.values()) { + if ((longFlags & (1 << flag.getIndex())) != 0) { + flags.add(flag); + } + } + if (protobufStreamInput.getVersion().before(Version.V_2_0_0)) { + protobufStreamInput.readStringArray(in); + } + groups = protobufStreamInput.readStringArray(in); + fieldDataFields = protobufStreamInput.readStringArray(in); + completionDataFields = protobufStreamInput.readStringArray(in); + includeSegmentFileSizes = in.readBool(); + includeUnloadedSegments = in.readBool(); + includeAllShardIndexingPressureTrackers = in.readBool(); + includeOnlyTopIndexingPressureMetrics = in.readBool(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + long longFlags = 0; + for (Flag flag : flags) { + longFlags |= (1 << flag.getIndex()); + } + out.writeInt64NoTag(longFlags); + + if (protobufStreamOutput.getVersion().before(Version.V_2_0_0)) { + protobufStreamOutput.writeStringArrayNullable(Strings.EMPTY_ARRAY, out); + } + protobufStreamOutput.writeStringArrayNullable(groups, out); + protobufStreamOutput.writeStringArrayNullable(fieldDataFields, out); + protobufStreamOutput.writeStringArrayNullable(completionDataFields, out); + out.writeBoolNoTag(includeSegmentFileSizes); + out.writeBoolNoTag(includeUnloadedSegments); + out.writeBoolNoTag(includeAllShardIndexingPressureTrackers); + out.writeBoolNoTag(includeOnlyTopIndexingPressureMetrics); + } + + /** + * Sets all flags to return all stats. + */ + public ProtobufCommonStatsFlags all() { + flags = EnumSet.allOf(Flag.class); + groups = null; + fieldDataFields = null; + completionDataFields = null; + includeSegmentFileSizes = false; + includeUnloadedSegments = false; + includeAllShardIndexingPressureTrackers = false; + includeOnlyTopIndexingPressureMetrics = false; + return this; + } + + /** + * Clears all stats. + */ + public ProtobufCommonStatsFlags clear() { + flags = EnumSet.noneOf(Flag.class); + groups = null; + fieldDataFields = null; + completionDataFields = null; + includeSegmentFileSizes = false; + includeUnloadedSegments = false; + includeAllShardIndexingPressureTrackers = false; + includeOnlyTopIndexingPressureMetrics = false; + return this; + } + + public boolean anySet() { + return !flags.isEmpty(); + } + + public Flag[] getFlags() { + return flags.toArray(new Flag[0]); + } + + /** + * Sets specific search group stats to retrieve the stats for. Mainly affects search + * when enabled. + */ + public ProtobufCommonStatsFlags groups(String... groups) { + this.groups = groups; + return this; + } + + public String[] groups() { + return this.groups; + } + + /** + * Sets specific search group stats to retrieve the stats for. Mainly affects search + * when enabled. + */ + public ProtobufCommonStatsFlags fieldDataFields(String... fieldDataFields) { + this.fieldDataFields = fieldDataFields; + return this; + } + + public String[] fieldDataFields() { + return this.fieldDataFields; + } + + public ProtobufCommonStatsFlags completionDataFields(String... completionDataFields) { + this.completionDataFields = completionDataFields; + return this; + } + + public String[] completionDataFields() { + return this.completionDataFields; + } + + public ProtobufCommonStatsFlags includeSegmentFileSizes(boolean includeSegmentFileSizes) { + this.includeSegmentFileSizes = includeSegmentFileSizes; + return this; + } + + public ProtobufCommonStatsFlags includeUnloadedSegments(boolean includeUnloadedSegments) { + this.includeUnloadedSegments = includeUnloadedSegments; + return this; + } + + public ProtobufCommonStatsFlags includeAllShardIndexingPressureTrackers(boolean includeAllShardPressureTrackers) { + this.includeAllShardIndexingPressureTrackers = includeAllShardPressureTrackers; + return this; + } + + public ProtobufCommonStatsFlags includeOnlyTopIndexingPressureMetrics(boolean includeOnlyTopIndexingPressureMetrics) { + this.includeOnlyTopIndexingPressureMetrics = includeOnlyTopIndexingPressureMetrics; + return this; + } + + public boolean includeUnloadedSegments() { + return this.includeUnloadedSegments; + } + + public boolean includeAllShardIndexingPressureTrackers() { + return this.includeAllShardIndexingPressureTrackers; + } + + public boolean includeOnlyTopIndexingPressureMetrics() { + return this.includeOnlyTopIndexingPressureMetrics; + } + + public boolean includeSegmentFileSizes() { + return this.includeSegmentFileSizes; + } + + public boolean isSet(Flag flag) { + return flags.contains(flag); + } + + boolean unSet(Flag flag) { + return flags.remove(flag); + } + + void set(Flag flag) { + flags.add(flag); + } + + public ProtobufCommonStatsFlags set(Flag flag, boolean add) { + if (add) { + set(flag); + } else { + unSet(flag); + } + return this; + } + + @Override + public ProtobufCommonStatsFlags clone() { + try { + ProtobufCommonStatsFlags cloned = (ProtobufCommonStatsFlags) super.clone(); + cloned.flags = flags.clone(); + return cloned; + } catch (CloneNotSupportedException e) { + throw new AssertionError(e); + } + } + + /** + * The flags. + * + * @opensearch.internal + */ + public enum Flag { + Store("store", 0), + Indexing("indexing", 1), + Get("get", 2), + Search("search", 3), + Merge("merge", 4), + Flush("flush", 5), + Refresh("refresh", 6), + QueryCache("query_cache", 7), + FieldData("fielddata", 8), + Docs("docs", 9), + Warmer("warmer", 10), + Completion("completion", 11), + Segments("segments", 12), + Translog("translog", 13), + // 14 was previously used for Suggest + RequestCache("request_cache", 15), + Recovery("recovery", 16); + + private final String restName; + private final int index; + + Flag(final String restName, final int index) { + this.restName = restName; + this.index = index; + } + + public String getRestName() { + return restName; + } + + private int getIndex() { + return index; + } + + } +} diff --git a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java index d30ee7e11bdfa..32da5e6002f55 100644 --- a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java @@ -31,8 +31,12 @@ package org.opensearch.action.support; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; import org.opensearch.OpenSearchParseException; import org.opensearch.core.ParseField; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContent; @@ -278,12 +282,26 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { out.writeEnumSet(expandWildcards); } + public void writeIndicesOptionsProtobuf(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + EnumSet

+* All operations performed are asynchronous by nature. Each action/operation has two flavors, the first +* simply returns an {@link org.opensearch.action.ActionFuture}, while the second accepts an +* {@link ActionListener}. +*

+* A client can be retrieved from a started {@link org.opensearch.node.Node}. +* +* @see org.opensearch.node.Node#client() +* +* @opensearch.internal +*/ +public interface ProtobufClient extends ProtobufOpenSearchClient { + + /** + * The admin client that can be used to perform administrative operations. + */ + ProtobufAdminClient admin(); +} diff --git a/server/src/main/java/org/opensearch/client/ProtobufClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ProtobufClusterAdminClient.java new file mode 100644 index 0000000000000..dfc638cc2a65c --- /dev/null +++ b/server/src/main/java/org/opensearch/client/ProtobufClusterAdminClient.java @@ -0,0 +1,220 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.client; + +import org.opensearch.action.ActionFuture; +import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; +import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; +import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; +import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; +import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; +import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoResponse; +import org.opensearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskRequest; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskRequestBuilder; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskResponse; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequest; +import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; +import org.opensearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; +import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; +import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; +import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; +import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequestBuilder; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder; +import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; +import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequestBuilder; +import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest; +import org.opensearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateRequestBuilder; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequestBuilder; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequestBuilder; +import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; +import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequestBuilder; +import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequestBuilder; +import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; +import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; +import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequestBuilder; +import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequest; +import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; +import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksResponse; +import org.opensearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; +import org.opensearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; +import org.opensearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; +import org.opensearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; +import org.opensearch.action.ingest.DeletePipelineRequest; +import org.opensearch.action.ingest.DeletePipelineRequestBuilder; +import org.opensearch.action.ingest.GetPipelineRequest; +import org.opensearch.action.ingest.GetPipelineRequestBuilder; +import org.opensearch.action.ingest.GetPipelineResponse; +import org.opensearch.action.ingest.PutPipelineRequest; +import org.opensearch.action.ingest.PutPipelineRequestBuilder; +import org.opensearch.action.ingest.SimulatePipelineRequest; +import org.opensearch.action.ingest.SimulatePipelineRequestBuilder; +import org.opensearch.action.ingest.SimulatePipelineResponse; +import org.opensearch.action.search.DeleteSearchPipelineRequest; +import org.opensearch.action.search.GetSearchPipelineRequest; +import org.opensearch.action.search.GetSearchPipelineResponse; +import org.opensearch.action.search.PutSearchPipelineRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.tasks.TaskId; + +/** + * Administrative actions/operations against indices. +* +* @see AdminClient#cluster() +* +* @opensearch.internal +*/ +public interface ProtobufClusterAdminClient extends ProtobufOpenSearchClient { + + /** + * The state of the cluster. + * + * @param request The cluster state request. + * @return The result future + * @see Requests#clusterStateRequest() + */ + ActionFuture state(ProtobufClusterStateRequest request); + + /** + * The state of the cluster. + * + * @param request The cluster state request. + * @param listener A listener to be notified with a result + * @see Requests#clusterStateRequest() + */ + void state(ProtobufClusterStateRequest request, ActionListener listener); + + /** + * The state of the cluster. + */ + ProtobufClusterStateRequestBuilder prepareState(); + + /** + * Nodes info of the cluster. + * + * @param request The nodes info request + * @return The result future + * @see org.opensearch.client.Requests#nodesInfoRequest(String...) + */ + ActionFuture nodesInfo(ProtobufNodesInfoRequest request); + + /** + * Nodes info of the cluster. + * + * @param request The nodes info request + * @param listener A listener to be notified with a result + * @see org.opensearch.client.Requests#nodesInfoRequest(String...) + */ + void nodesInfo(ProtobufNodesInfoRequest request, ActionListener listener); + + /** + * Nodes info of the cluster. + */ + NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds); + + // /** + // * Nodes stats of the cluster. + // * + // * @param request The nodes stats request + // * @return The result future + // * @see org.opensearch.client.Requests#nodesStatsRequest(String...) + // */ + // ActionFuture nodesStats(NodesStatsRequest request); + + // /** + // * Nodes stats of the cluster. + // * + // * @param request The nodes info request + // * @param listener A listener to be notified with a result + // * @see org.opensearch.client.Requests#nodesStatsRequest(String...) + // */ + // void nodesStats(NodesStatsRequest request, ActionListener listener); + + // /** + // * Nodes stats of the cluster. + // */ + // NodesStatsRequestBuilder prepareNodesStats(String... nodesIds); +} diff --git a/server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java b/server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java new file mode 100644 index 0000000000000..a8db169724efb --- /dev/null +++ b/server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java @@ -0,0 +1,63 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.client; + +import org.opensearch.action.ActionType; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionResponse; +import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; + +/** + * Interface for an OpenSearch client implementation +* +* @opensearch.internal +*/ +public interface ProtobufOpenSearchClient { + + /** + * Executes a generic action, denoted by an {@link ProtobufActionType}. + * + * @param action The action type to execute. + * @param request The action request. + * @param The request type. + * @param the response type. + * @return A future allowing to get back the response. + */ + ActionFuture execute( + ProtobufActionType action, + Request request + ); + + /** + * Executes a generic action, denoted by an {@link ProtobufActionType}. + * + * @param action The action type to execute. + * @param request The action request. + * @param listener The listener to receive the response back. + * @param The request type. + * @param The response type. + */ + void execute( + ProtobufActionType action, + Request request, + ActionListener listener + ); + + /** + * Returns the threadpool used to execute requests on this client + */ + ProtobufThreadPool threadPool(); + +} diff --git a/server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java b/server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java new file mode 100644 index 0000000000000..2368b98e2b5e0 --- /dev/null +++ b/server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java @@ -0,0 +1,139 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.client.node; + +import org.opensearch.action.ActionType; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionModule.DynamicActionRegistry; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionResponse; +import org.opensearch.action.support.TransportAction; +import org.opensearch.client.Client; +import org.opensearch.client.support.AbstractClient; +import org.opensearch.client.support.ProtobufAbstractClient; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.settings.Settings; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskListener; +import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.RemoteClusterService; + +import java.util.function.Supplier; + +/** + * Client that executes actions on the local node. +* +* @opensearch.internal +*/ +public class ProtobufNodeClient extends ProtobufAbstractClient { + + private DynamicActionRegistry actionRegistry; + /** + * The id of the local {@link DiscoveryNode}. Useful for generating task ids from tasks returned by + * {@link #executeLocally(ActionType, ActionRequest, TaskListener)}. + */ + private Supplier localNodeId; + private RemoteClusterService remoteClusterService; + private NamedWriteableRegistry namedWriteableRegistry; + + public ProtobufNodeClient(Settings settings, ProtobufThreadPool threadPool) { + super(settings, threadPool); + } + + public void initialize( + DynamicActionRegistry actionRegistry, + Supplier localNodeId, + RemoteClusterService remoteClusterService, + NamedWriteableRegistry namedWriteableRegistry + ) { + this.actionRegistry = actionRegistry; + this.localNodeId = localNodeId; + this.remoteClusterService = remoteClusterService; + this.namedWriteableRegistry = namedWriteableRegistry; + } + + @Override + public void close() { + // nothing really to do + } + + @Override + public void doExecute( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + // Discard the task because the Client interface doesn't use it. + executeLocally(action, request, listener); + } + + /** + * Execute an {@link ActionType} locally, returning that {@link Task} used to track it, and linking an {@link ActionListener}. + * Prefer this method if you don't need access to the task when listening for the response. This is the method used to implement + * the {@link Client} interface. + */ + public Task executeLocally( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + return transportAction(action).execute(request, listener); + } + + /** + * Execute an {@link ActionType} locally, returning that {@link Task} used to track it, and linking an {@link TaskListener}. Prefer this + * method if you need access to the task when listening for the response. + */ + public Task executeLocally( + ProtobufActionType action, + Request request, + TaskListener listener + ) { + return transportAction(action).execute(request, listener); + } + + /** + * The id of the local {@link DiscoveryNode}. Useful for generating task ids from tasks returned by + * {@link #executeLocally(ProtobufActionType, ProtobufActionRequest, TaskListener)}. + */ + public String getLocalNodeId() { + return localNodeId.get(); + } + + /** + * Get the {@link TransportAction} for an {@link ActionType}, throwing exceptions if the action isn't available. + */ + @SuppressWarnings("unchecked") + private TransportAction transportAction( + ProtobufActionType action + ) { + if (actionRegistry == null) { + throw new IllegalStateException("NodeClient has not been initialized"); + } + TransportAction transportAction = (TransportAction) actionRegistry.get(action); + if (transportAction == null) { + throw new IllegalStateException("failed to find action [" + action + "] to execute"); + } + return transportAction; + } + + @Override + public Client getRemoteClusterClient(String clusterAlias) { + return remoteClusterService.getRemoteClusterClient(threadPool(), clusterAlias); + } + + public NamedWriteableRegistry getNamedWriteableRegistry() { + return namedWriteableRegistry; + } +} diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 40489e29ed9b5..e43f96eeddc7c 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -67,6 +67,8 @@ import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoResponse; import org.opensearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; import org.opensearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.opensearch.action.admin.cluster.node.stats.NodesStatsAction; @@ -164,6 +166,8 @@ import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateRequestBuilder; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; import org.opensearch.action.admin.cluster.stats.ClusterStatsAction; import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest; import org.opensearch.action.admin.cluster.stats.ClusterStatsRequestBuilder; @@ -876,6 +880,21 @@ public NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds) { return new NodesInfoRequestBuilder(this, NodesInfoAction.INSTANCE).setNodesIds(nodesIds); } + @Override + public ActionFuture nodesInfo(final ProtobufNodesInfoRequest request) { + return execute(NodesInfoAction.INSTANCE, request); + } + + @Override + public void nodesInfo(final NodesInfoRequest request, final ActionListener listener) { + execute(NodesInfoAction.INSTANCE, request, listener); + } + + @Override + public NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds) { + return new NodesInfoRequestBuilder(this, NodesInfoAction.INSTANCE).setNodesIds(nodesIds); + } + @Override public ActionFuture nodesStats(final NodesStatsRequest request) { return execute(NodesStatsAction.INSTANCE, request); diff --git a/server/src/main/java/org/opensearch/client/support/ProtobufAbstractClient.java b/server/src/main/java/org/opensearch/client/support/ProtobufAbstractClient.java new file mode 100644 index 0000000000000..d069c0edb0077 --- /dev/null +++ b/server/src/main/java/org/opensearch/client/support/ProtobufAbstractClient.java @@ -0,0 +1,581 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.client.support; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainAction; +import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; +import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; +import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.admin.cluster.health.ClusterHealthAction; +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; +import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction; +import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; +import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; +import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; +import org.opensearch.action.admin.cluster.node.info.NodesInfoAction; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoResponse; +import org.opensearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; +import org.opensearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsAction; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskAction; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskRequest; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskRequestBuilder; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskResponse; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksAction; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.opensearch.action.admin.cluster.node.usage.NodesUsageAction; +import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequest; +import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; +import org.opensearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreAction; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; +import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; +import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; +import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; +import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; +import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; +import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesAction; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequestBuilder; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryAction; +import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder; +import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryAction; +import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; +import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequestBuilder; +import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteAction; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsAction; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterAddWeightedRoutingAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.snapshots.clone.CloneSnapshotAction; +import org.opensearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest; +import org.opensearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; +import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsAction; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotAction; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusAction; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.opensearch.action.admin.cluster.state.ClusterStateAction; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateRequestBuilder; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateAction; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; +import org.opensearch.action.admin.cluster.stats.ClusterStatsAction; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequestBuilder; +import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; +import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; +import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequestBuilder; +import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptAction; +import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequestBuilder; +import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; +import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptAction; +import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; +import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequestBuilder; +import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksAction; +import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequest; +import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; +import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksResponse; +import org.opensearch.action.admin.indices.alias.IndicesAliasesAction; +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; +import org.opensearch.action.admin.indices.alias.get.GetAliasesAction; +import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.opensearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; +import org.opensearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.opensearch.action.admin.indices.analyze.AnalyzeAction; +import org.opensearch.action.admin.indices.analyze.AnalyzeRequestBuilder; +import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheAction; +import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; +import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder; +import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; +import org.opensearch.action.admin.indices.close.CloseIndexAction; +import org.opensearch.action.admin.indices.close.CloseIndexRequest; +import org.opensearch.action.admin.indices.close.CloseIndexRequestBuilder; +import org.opensearch.action.admin.indices.close.CloseIndexResponse; +import org.opensearch.action.admin.indices.create.CreateIndexAction; +import org.opensearch.action.admin.indices.create.CreateIndexRequest; +import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.opensearch.action.admin.indices.create.CreateIndexResponse; +import org.opensearch.action.admin.indices.dangling.delete.DeleteDanglingIndexAction; +import org.opensearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; +import org.opensearch.action.admin.indices.dangling.import_index.ImportDanglingIndexAction; +import org.opensearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; +import org.opensearch.action.admin.indices.dangling.list.ListDanglingIndicesAction; +import org.opensearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; +import org.opensearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; +import org.opensearch.action.admin.indices.datastream.CreateDataStreamAction; +import org.opensearch.action.admin.indices.datastream.DeleteDataStreamAction; +import org.opensearch.action.admin.indices.datastream.GetDataStreamAction; +import org.opensearch.action.admin.indices.delete.DeleteIndexAction; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequestBuilder; +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsAction; +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest; +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequestBuilder; +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse; +import org.opensearch.action.admin.indices.flush.FlushAction; +import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.admin.indices.flush.FlushRequestBuilder; +import org.opensearch.action.admin.indices.flush.FlushResponse; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeAction; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.opensearch.action.admin.indices.get.GetIndexAction; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexRequestBuilder; +import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsAction; +import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; +import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsRequestBuilder; +import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; +import org.opensearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.opensearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.opensearch.action.admin.indices.mapping.get.GetMappingsRequestBuilder; +import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.opensearch.action.admin.indices.mapping.put.PutMappingAction; +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; +import org.opensearch.action.admin.indices.open.OpenIndexAction; +import org.opensearch.action.admin.indices.open.OpenIndexRequest; +import org.opensearch.action.admin.indices.open.OpenIndexRequestBuilder; +import org.opensearch.action.admin.indices.open.OpenIndexResponse; +import org.opensearch.action.admin.indices.readonly.AddIndexBlockAction; +import org.opensearch.action.admin.indices.readonly.AddIndexBlockRequest; +import org.opensearch.action.admin.indices.readonly.AddIndexBlockRequestBuilder; +import org.opensearch.action.admin.indices.readonly.AddIndexBlockResponse; +import org.opensearch.action.admin.indices.recovery.RecoveryAction; +import org.opensearch.action.admin.indices.recovery.RecoveryRequest; +import org.opensearch.action.admin.indices.recovery.RecoveryRequestBuilder; +import org.opensearch.action.admin.indices.recovery.RecoveryResponse; +import org.opensearch.action.admin.indices.refresh.RefreshAction; +import org.opensearch.action.admin.indices.refresh.RefreshRequest; +import org.opensearch.action.admin.indices.refresh.RefreshRequestBuilder; +import org.opensearch.action.admin.indices.refresh.RefreshResponse; +import org.opensearch.action.admin.indices.resolve.ResolveIndexAction; +import org.opensearch.action.admin.indices.rollover.RolloverAction; +import org.opensearch.action.admin.indices.rollover.RolloverRequest; +import org.opensearch.action.admin.indices.rollover.RolloverRequestBuilder; +import org.opensearch.action.admin.indices.rollover.RolloverResponse; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsAction; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsRequest; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsRequestBuilder; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; +import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.opensearch.action.admin.indices.segments.IndicesSegmentsAction; +import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder; +import org.opensearch.action.admin.indices.segments.PitSegmentsAction; +import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; +import org.opensearch.action.admin.indices.settings.get.GetSettingsAction; +import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.opensearch.action.admin.indices.settings.get.GetSettingsRequestBuilder; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; +import org.opensearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; +import org.opensearch.action.admin.indices.shards.IndicesShardStoresAction; +import org.opensearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.opensearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.opensearch.action.admin.indices.shrink.ResizeAction; +import org.opensearch.action.admin.indices.shrink.ResizeRequest; +import org.opensearch.action.admin.indices.shrink.ResizeRequestBuilder; +import org.opensearch.action.admin.indices.shrink.ResizeResponse; +import org.opensearch.action.admin.indices.stats.IndicesStatsAction; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequestBuilder; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; +import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; +import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; +import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesAction; +import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesRequest; +import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesRequestBuilder; +import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.opensearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; +import org.opensearch.action.admin.indices.upgrade.get.UpgradeStatusAction; +import org.opensearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; +import org.opensearch.action.admin.indices.upgrade.get.UpgradeStatusRequestBuilder; +import org.opensearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; +import org.opensearch.action.admin.indices.upgrade.post.UpgradeAction; +import org.opensearch.action.admin.indices.upgrade.post.UpgradeRequest; +import org.opensearch.action.admin.indices.upgrade.post.UpgradeRequestBuilder; +import org.opensearch.action.admin.indices.upgrade.post.UpgradeResponse; +import org.opensearch.action.admin.indices.validate.query.ValidateQueryAction; +import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; +import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.opensearch.action.bulk.BulkAction; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkRequestBuilder; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.delete.DeleteAction; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.delete.DeleteRequestBuilder; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.explain.ExplainAction; +import org.opensearch.action.explain.ExplainRequest; +import org.opensearch.action.explain.ExplainRequestBuilder; +import org.opensearch.action.explain.ExplainResponse; +import org.opensearch.action.fieldcaps.FieldCapabilitiesAction; +import org.opensearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.opensearch.action.fieldcaps.FieldCapabilitiesRequestBuilder; +import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.opensearch.action.get.GetAction; +import org.opensearch.action.get.GetRequest; +import org.opensearch.action.get.GetRequestBuilder; +import org.opensearch.action.get.GetResponse; +import org.opensearch.action.get.MultiGetAction; +import org.opensearch.action.get.MultiGetRequest; +import org.opensearch.action.get.MultiGetRequestBuilder; +import org.opensearch.action.get.MultiGetResponse; +import org.opensearch.action.index.IndexAction; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.ingest.DeletePipelineAction; +import org.opensearch.action.ingest.DeletePipelineRequest; +import org.opensearch.action.ingest.DeletePipelineRequestBuilder; +import org.opensearch.action.ingest.GetPipelineAction; +import org.opensearch.action.ingest.GetPipelineRequest; +import org.opensearch.action.ingest.GetPipelineRequestBuilder; +import org.opensearch.action.ingest.GetPipelineResponse; +import org.opensearch.action.ingest.PutPipelineAction; +import org.opensearch.action.ingest.PutPipelineRequest; +import org.opensearch.action.ingest.PutPipelineRequestBuilder; +import org.opensearch.action.ingest.SimulatePipelineAction; +import org.opensearch.action.ingest.SimulatePipelineRequest; +import org.opensearch.action.ingest.SimulatePipelineRequestBuilder; +import org.opensearch.action.ingest.SimulatePipelineResponse; +import org.opensearch.action.search.ClearScrollAction; +import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.ClearScrollRequestBuilder; +import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.DeleteSearchPipelineAction; +import org.opensearch.action.search.DeleteSearchPipelineRequest; +import org.opensearch.action.search.GetAllPitNodesRequest; +import org.opensearch.action.search.GetAllPitNodesResponse; +import org.opensearch.action.search.GetSearchPipelineAction; +import org.opensearch.action.search.GetSearchPipelineRequest; +import org.opensearch.action.search.GetSearchPipelineResponse; +import org.opensearch.action.search.MultiSearchAction; +import org.opensearch.action.search.MultiSearchRequest; +import org.opensearch.action.search.MultiSearchRequestBuilder; +import org.opensearch.action.search.MultiSearchResponse; +import org.opensearch.action.search.GetAllPitsAction; +import org.opensearch.action.search.PutSearchPipelineAction; +import org.opensearch.action.search.PutSearchPipelineRequest; +import org.opensearch.action.search.SearchAction; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchScrollAction; +import org.opensearch.action.search.SearchScrollRequest; +import org.opensearch.action.search.SearchScrollRequestBuilder; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.termvectors.MultiTermVectorsAction; +import org.opensearch.action.termvectors.MultiTermVectorsRequest; +import org.opensearch.action.termvectors.MultiTermVectorsRequestBuilder; +import org.opensearch.action.termvectors.MultiTermVectorsResponse; +import org.opensearch.action.termvectors.TermVectorsAction; +import org.opensearch.action.termvectors.TermVectorsRequest; +import org.opensearch.action.termvectors.TermVectorsRequestBuilder; +import org.opensearch.action.termvectors.TermVectorsResponse; +import org.opensearch.action.update.UpdateAction; +import org.opensearch.action.update.UpdateRequest; +import org.opensearch.action.update.UpdateRequestBuilder; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.client.AdminClient; +import org.opensearch.client.Client; +import org.opensearch.client.ClusterAdminClient; +import org.opensearch.client.FilterClient; +import org.opensearch.client.IndicesAdminClient; +import org.opensearch.client.OpenSearchClient; +import org.opensearch.client.ProtobufAdminClient; +import org.opensearch.client.ProtobufClient; +import org.opensearch.client.ProtobufClusterAdminClient; +import org.opensearch.client.ProtobufOpenSearchClient; +import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; +import org.opensearch.common.Nullable; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.tasks.TaskId; +import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Map; + +/** + * Base client used to create concrete client implementations +* +* @opensearch.internal +*/ +public abstract class ProtobufAbstractClient implements ProtobufClient { + + protected final Logger logger; + + protected final Settings settings; + private final ProtobufThreadPool threadPool; + private final Admin admin; + + public ProtobufAbstractClient(Settings settings, ProtobufThreadPool threadPool) { + this.settings = settings; + this.threadPool = threadPool; + this.admin = new Admin(this); + this.logger = LogManager.getLogger(this.getClass()); + } + + @Override + public final Settings settings() { + return this.settings; + } + + @Override + public final ProtobufThreadPool threadPool() { + return this.threadPool; + } + + @Override + public final ProtobufAdminClient admin() { + return admin; + } + + @Override + public final ActionFuture execute( + ProtobufActionType action, + Request request + ) { + PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + execute(action, request, actionFuture); + return actionFuture; + } + + /** + * This is the single execution point of *all* clients. + */ + @Override + public final void execute( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + doExecute(action, request, listener); + } + + protected abstract void doExecute( + ProtobufActionType action, + Request request, + ActionListener listener + ); + + static class Admin implements ProtobufAdminClient { + + private final ClusterAdmin clusterAdmin; + + Admin(ProtobufOpenSearchClient client) { + this.clusterAdmin = new ClusterAdmin(client); + } + + @Override + public ProtobufClusterAdminClient cluster() { + return clusterAdmin; + } + } + + static class ClusterAdmin implements ProtobufClusterAdminClient { + + private final ProtobufOpenSearchClient client; + + ClusterAdmin(ProtobufOpenSearchClient client) { + this.client = client; + } + + @Override + public ActionFuture execute( + ProtobufActionType action, + Request request + ) { + return client.execute(action, request); + } + + @Override + public void execute( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + client.execute(action, request, listener); + } + + @Override + public ActionFuture state(final ProtobufClusterStateRequest request) { + return execute(ProtobufClusterStateAction.INSTANCE, request); + } + + @Override + public void state(final ProtobufClusterStateRequest request, final ActionListener listener) { + execute(ProtobufClusterStateAction.INSTANCE, request, listener); + } + + @Override + public ActionFuture nodesInfo(final ProtobufNodesInfoRequest request) { + return execute(NodesInfoAction.INSTANCE, request); + } + + @Override + public void nodesInfo(final ProtobufNodesInfoRequest request, final ActionListener listener) { + execute(NodesInfoAction.INSTANCE, request, listener); + } + + @Override + public NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds) { + return new NodesInfoRequestBuilder(this, NodesInfoAction.INSTANCE).setNodesIds(nodesIds); + } + + @Override + public ActionFuture nodesInfo(final ProtobufNodesInfoRequest request) { + return execute(NodesInfoAction.INSTANCE, request); + } + + @Override + public void nodesInfo(final ProtobufNodesInfoRequest request, final ActionListener listener) { + execute(NodesInfoAction.INSTANCE, request, listener); + } + + @Override + public NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds) { + return new NodesInfoRequestBuilder(this, NodesInfoAction.INSTANCE).setNodesIds(nodesIds); + } + + // @Override + // public ActionFuture nodesStats(final NodesStatsRequest request) { + // return execute(NodesStatsAction.INSTANCE, request); + // } + + // @Override + // public void nodesStats(final NodesStatsRequest request, final ActionListener listener) { + // execute(NodesStatsAction.INSTANCE, request, listener); + // } + + // @Override + // public NodesStatsRequestBuilder prepareNodesStats(String... nodesIds) { + // return new NodesStatsRequestBuilder(this, NodesStatsAction.INSTANCE).setNodesIds(nodesIds); + // } + } + + @Override + public Client filterWithHeader(Map headers) { + return new FilterClient(this) { + @Override + protected void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + ThreadContext threadContext = threadPool().getThreadContext(); + try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(headers)) { + super.doExecute(action, request, listener); + } + } + }; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufClusterChangedEvent.java b/server/src/main/java/org/opensearch/cluster/ProtobufClusterChangedEvent.java new file mode 100644 index 0000000000000..59f6f7bfeb29e --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufClusterChangedEvent.java @@ -0,0 +1,307 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.cluster; + +import org.opensearch.cluster.metadata.IndexGraveyard; +import org.opensearch.cluster.metadata.IndexGraveyard.IndexGraveyardDiff; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.ProtobufDiscoveryNodes; +import org.opensearch.gateway.GatewayService; +import org.opensearch.index.Index; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * An event received by the local node, signaling that the cluster state has changed. +* +* @opensearch.internal +*/ +public class ProtobufClusterChangedEvent { + + private final String source; + + private final ProtobufClusterState previousState; + + private final ProtobufClusterState state; + + private final ProtobufDiscoveryNodes.Delta nodesDelta; + + public ProtobufClusterChangedEvent(String source, ProtobufClusterState state, ProtobufClusterState previousState) { + Objects.requireNonNull(source, "source must not be null"); + Objects.requireNonNull(state, "state must not be null"); + Objects.requireNonNull(previousState, "previousState must not be null"); + this.source = source; + this.state = state; + this.previousState = previousState; + this.nodesDelta = state.nodes().delta(previousState.nodes()); + } + + /** + * The source that caused this cluster event to be raised. + */ + public String source() { + return this.source; + } + + /** + * The new cluster state that caused this change event. + */ + public ProtobufClusterState state() { + return this.state; + } + + /** + * The previous cluster state for this change event. + */ + public ProtobufClusterState previousState() { + return this.previousState; + } + + /** + * Returns true iff the routing tables (for all indices) have + * changed between the previous cluster state and the current cluster state. + * Note that this is an object reference equality test, not an equals test. + */ + public boolean routingTableChanged() { + return state.routingTable() != previousState.routingTable(); + } + + /** + * Returns true iff the routing table has changed for the given index. + * Note that this is an object reference equality test, not an equals test. + */ + public boolean indexRoutingTableChanged(String index) { + Objects.requireNonNull(index, "index must not be null"); + if (!state.routingTable().hasIndex(index) && !previousState.routingTable().hasIndex(index)) { + return false; + } + if (state.routingTable().hasIndex(index) && previousState.routingTable().hasIndex(index)) { + return state.routingTable().index(index) != previousState.routingTable().index(index); + } + return true; + } + + /** + * Returns the indices created in this event + */ + public List indicesCreated() { + if (!metadataChanged()) { + return Collections.emptyList(); + } + List created = null; + for (final String index : state.metadata().indices().keySet()) { + if (!previousState.metadata().hasIndex(index)) { + if (created == null) { + created = new ArrayList<>(); + } + created.add(index); + } + } + return created == null ? Collections.emptyList() : created; + } + + /** + * Returns the indices deleted in this event + */ + public List indicesDeleted() { + if (previousState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + // working off of a non-initialized previous state, so use the tombstones for index deletions + return indicesDeletedFromTombstones(); + } else { + // examine the diffs in index metadata between the previous and new cluster states to get the deleted indices + return indicesDeletedFromClusterState(); + } + } + + /** + * Returns true iff the metadata for the cluster has changed between + * the previous cluster state and the new cluster state. Note that this is an object + * reference equality test, not an equals test. + */ + public boolean metadataChanged() { + return state.metadata() != previousState.metadata(); + } + + /** + * Returns a set of custom meta data types when any custom metadata for the cluster has changed + * between the previous cluster state and the new cluster state. custom meta data types are + * returned iff they have been added, updated or removed between the previous and the current state + */ + public Set changedCustomMetadataSet() { + Set result = new HashSet<>(); + Map currentCustoms = state.metadata().customs(); + Map previousCustoms = previousState.metadata().customs(); + if (currentCustoms.equals(previousCustoms) == false) { + for (Map.Entry currentCustomMetadata : currentCustoms.entrySet()) { + // new custom md added or existing custom md changed + if (previousCustoms.containsKey(currentCustomMetadata.getKey()) == false + || currentCustomMetadata.getValue().equals(previousCustoms.get(currentCustomMetadata.getKey())) == false) { + result.add(currentCustomMetadata.getKey()); + } + } + // existing custom md deleted + for (Map.Entry previousCustomMetadata : previousCustoms.entrySet()) { + if (currentCustoms.containsKey(previousCustomMetadata.getKey()) == false) { + result.add(previousCustomMetadata.getKey()); + } + } + } + return result; + } + + /** + * Returns true iff the {@link IndexMetadata} for a given index + * has changed between the previous cluster state and the new cluster state. + * Note that this is an object reference equality test, not an equals test. + */ + public static boolean indexMetadataChanged(IndexMetadata metadata1, IndexMetadata metadata2) { + assert metadata1 != null && metadata2 != null; + // no need to check on version, since disco modules will make sure to use the + // same instance if its a version match + return metadata1 != metadata2; + } + + /** + * Returns true iff the cluster level blocks have changed between cluster states. + * Note that this is an object reference equality test, not an equals test. + */ + public boolean blocksChanged() { + return state.blocks() != previousState.blocks(); + } + + /** + * Returns true iff the local node is the mater node of the cluster. + */ + public boolean localNodeClusterManager() { + return state.nodes().isLocalNodeElectedClusterManager(); + } + + /** + * Returns true iff the local node is the mater node of the cluster. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #localNodeClusterManager()} + */ + @Deprecated + public boolean localNodeMaster() { + return localNodeClusterManager(); + } + + /** + * Returns the {@link org.opensearch.cluster.node.ProtobufDiscoveryNodes.Delta} between + * the previous cluster state and the new cluster state. + */ + public ProtobufDiscoveryNodes.Delta nodesDelta() { + return this.nodesDelta; + } + + /** + * Returns true iff nodes have been removed from the cluster since the last cluster state. + */ + public boolean nodesRemoved() { + return nodesDelta.removed(); + } + + /** + * Returns true iff nodes have been added from the cluster since the last cluster state. + */ + public boolean nodesAdded() { + return nodesDelta.added(); + } + + /** + * Returns true iff nodes have been changed (added or removed) from the cluster since the last cluster state. + */ + public boolean nodesChanged() { + return nodesRemoved() || nodesAdded(); + } + + /** + * Determines whether or not the current cluster state represents an entirely + * new cluster, either when a node joins a cluster for the first time or when + * the node receives a cluster state update from a brand new cluster (different + * UUID from the previous cluster), which will happen when a cluster-manager node is + * elected that has never been part of the cluster before. + */ + public boolean isNewCluster() { + final String prevClusterUUID = previousState.metadata().clusterUUID(); + final String currClusterUUID = state.metadata().clusterUUID(); + return prevClusterUUID.equals(currClusterUUID) == false; + } + + // Get the deleted indices by comparing the index metadatas in the previous and new cluster states. + // If an index exists in the previous cluster state, but not in the new cluster state, it must have been deleted. + private List indicesDeletedFromClusterState() { + // If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected cluster-manager + // that has had its data directory wiped out, in which case we don't want to delete the indices and lose data; + // rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous + // cluster UUID, in which case, we don't want to delete indices that the cluster-manager erroneously believes shouldn't exist. + // See test DiscoveryWithServiceDisruptionsIT.testIndicesDeleted() + // See discussion on https://github.com/elastic/elasticsearch/pull/9952 and + // https://github.com/elastic/elasticsearch/issues/11665 + if (metadataChanged() == false || isNewCluster()) { + return Collections.emptyList(); + } + Set deleted = null; + final Metadata previousMetadata = previousState.metadata(); + final Metadata currentMetadata = state.metadata(); + + for (final IndexMetadata index : previousMetadata.indices().values()) { + IndexMetadata current = currentMetadata.index(index.getIndex()); + if (current == null) { + if (deleted == null) { + deleted = new HashSet<>(); + } + deleted.add(index.getIndex()); + } + } + + final IndexGraveyard currentGraveyard = currentMetadata.indexGraveyard(); + final IndexGraveyard previousGraveyard = previousMetadata.indexGraveyard(); + + // Look for new entries in the index graveyard, where there's no corresponding index in the + // previous metadata. This indicates that a dangling index has been explicitly deleted, so + // each node should make sure to delete any related data. + if (currentGraveyard != previousGraveyard) { + final IndexGraveyardDiff indexGraveyardDiff = (IndexGraveyardDiff) currentGraveyard.diff(previousGraveyard); + + final List added = indexGraveyardDiff.getAdded(); + + if (added.isEmpty() == false) { + if (deleted == null) { + deleted = new HashSet<>(); + } + for (IndexGraveyard.Tombstone tombstone : added) { + deleted.add(tombstone.getIndex()); + } + } + } + + return deleted == null ? Collections.emptyList() : new ArrayList<>(deleted); + } + + private List indicesDeletedFromTombstones() { + // We look at the full tombstones list to see which indices need to be deleted. In the case of + // a valid previous cluster state, indicesDeletedFromClusterState() will be used to get the deleted + // list, so a diff doesn't make sense here. When a node (re)joins the cluster, its possible for it + // to re-process the same deletes or process deletes about indices it never knew about. This is not + // an issue because there are safeguards in place in the delete store operation in case the index + // folder doesn't exist on the file system. + List tombstones = state.metadata().indexGraveyard().getTombstones(); + return tombstones.stream().map(IndexGraveyard.Tombstone::getIndex).collect(Collectors.toList()); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufClusterName.java b/server/src/main/java/org/opensearch/cluster/ProtobufClusterName.java new file mode 100644 index 0000000000000..a41eed231ef18 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufClusterName.java @@ -0,0 +1,61 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.cluster; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; + +/** + * Cluster Name +* +* @opensearch.internal +*/ +public class ProtobufClusterName implements ProtobufWriteable { + + public static final Setting CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "opensearch", (s) -> { + if (s.isEmpty()) { + throw new IllegalArgumentException("[cluster.name] must not be empty"); + } + if (s.contains(":")) { + throw new IllegalArgumentException("[cluster.name] must not contain ':'"); + } + return new ProtobufClusterName(s); + }, Setting.Property.NodeScope); + + public static final ProtobufClusterName DEFAULT = CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); + + private final String value; + + public ProtobufClusterName(CodedInputStream input) throws IOException { + this(input.readString()); + } + + public ProtobufClusterName(String value) { + this.value = value.intern(); + } + + public String value() { + return this.value; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeStringNoTag(value); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java b/server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java new file mode 100644 index 0000000000000..85adfd417936d --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java @@ -0,0 +1,803 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.cluster; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.opensearch.cluster.block.ClusterBlock; +import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; +import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.ProtobufDiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Strings; +import org.opensearch.common.UUIDs; +import org.opensearch.common.collect.ImmutableOpenMap; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufVersionedNamedWriteable; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.discovery.Discovery; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.stream.StreamSupport; + +import static org.opensearch.cluster.coordination.Coordinator.ZEN1_BWC_TERM; + +/** + * Represents the current state of the cluster. +*

+* The cluster state object is immutable with the exception of the {@link RoutingNodes} structure, which is +* built on demand from the {@link RoutingTable}. +* The cluster state can be updated only on the cluster-manager node. All updates are performed by on a +* single thread and controlled by the {@link ClusterService}. After every update the +* {@link Discovery#publish} method publishes a new version of the cluster state to all other nodes in the +* cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on +* the type of discovery. +*

+* The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state +* differences instead of the entire state on each change. The publishing mechanism should only send differences +* to a node if this node was present in the previous version of the cluster state. If a node was +* not present in the previous version of the cluster state, this node is unlikely to have the previous cluster +* state version and should be sent a complete version. In order to make sure that the differences are applied to the +* correct version of the cluster state, each cluster state version update generates {@link #stateUUID} that uniquely +* identifies this version of the state. This uuid is verified by the {@link ClusterStateDiff#apply} method to +* make sure that the correct diffs are applied. If uuids don’t match, the {@link ClusterStateDiff#apply} method +* throws the {@link IncompatibleClusterStateVersionException}, which causes the publishing mechanism to send +* a full version of the cluster state to the node on which this exception was thrown. +* +* @opensearch.internal +*/ +public class ProtobufClusterState implements ToXContentFragment, ProtobufDiffable { + + public static final ProtobufClusterState EMPTY_STATE = builder(ProtobufClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .build(); + + /** + * An interface that implementors use when a class requires a client to maybe have a feature. + * + * @opensearch.internal + */ + public interface FeatureAware { + + /** + * An optional feature that is required for the client to have. + * + * @return an empty optional if no feature is required otherwise a string representing the required feature + */ + default Optional getRequiredFeature() { + return Optional.empty(); + } + + /** + * Tests whether the custom should be serialized. The criterion is that + * the output stream must be at least the minimum supported version of the custom. + *

+ * That is, we only serialize customs to clients than can understand the custom based on the version of the client. + * + * @param out the output stream + * @param custom the custom to serialize + * @param the type of the custom + * @return true if the custom should be serialized and false otherwise + */ + static boolean shouldSerialize( + final CodedOutputStream out, + final T custom + ) { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + return protobufStreamOutput.getVersion().onOrAfter(custom.getMinimalSupportedVersion()); + } + + } + + /** + * Custom cluster state. + * + * @opensearch.internal + */ + public interface Custom extends ProtobufNamedDiffable, ToXContentFragment, FeatureAware { + + /** + * Returns true iff this {@link Custom} is private to the cluster and should never be send to a client. + * The default is false; + */ + default boolean isPrivate() { + return false; + } + + } + + private static final ProtobufNamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new ProtobufNamedDiffableValueSerializer<>( + Custom.class + ); + + public static final String UNKNOWN_UUID = "_na_"; + + public static final long UNKNOWN_VERSION = -1; + + private final long version; + + private final String stateUUID; + + private final RoutingTable routingTable; + + private final ProtobufDiscoveryNodes nodes; + + private final Metadata metadata; + + private final ClusterBlocks blocks; + + private final ImmutableOpenMap customs; + + private final ProtobufClusterName clusterName; + + private final boolean wasReadFromDiff; + + private final int minimumClusterManagerNodesOnPublishingClusterManager; + + // built on demand + private volatile RoutingNodes routingNodes; + + public ProtobufClusterState(long version, String stateUUID, ProtobufClusterState state) { + this( + state.clusterName, + version, + stateUUID, + state.metadata(), + state.routingTable(), + state.nodes(), + state.blocks(), + state.customs(), + -1, + false + ); + } + + public ProtobufClusterState( + ProtobufClusterName clusterName, + long version, + String stateUUID, + Metadata metadata, + RoutingTable routingTable, + ProtobufDiscoveryNodes nodes, + ClusterBlocks blocks, + ImmutableOpenMap customs, + int minimumClusterManagerNodesOnPublishingClusterManager, + boolean wasReadFromDiff + ) { + this.version = version; + this.stateUUID = stateUUID; + this.clusterName = clusterName; + this.metadata = metadata; + this.routingTable = routingTable; + this.nodes = nodes; + this.blocks = blocks; + this.customs = customs; + this.minimumClusterManagerNodesOnPublishingClusterManager = minimumClusterManagerNodesOnPublishingClusterManager; + this.wasReadFromDiff = wasReadFromDiff; + } + + public long term() { + return coordinationMetadata().term(); + } + + public long version() { + return this.version; + } + + public long getVersion() { + return version(); + } + + public long getVersionOrMetadataVersion() { + // When following a Zen1 cluster-manager, the cluster state version is not guaranteed to increase, + // so instead it is preferable to use the metadata version to determine the freshest node. + // However when following a Zen2 cluster-manager the cluster state version should be used. + return term() == ZEN1_BWC_TERM ? metadata().version() : version(); + } + + /** + * This stateUUID is automatically generated for for each version of cluster state. It is used to make sure that + * we are applying diffs to the right previous state. + */ + public String stateUUID() { + return this.stateUUID; + } + + public ProtobufDiscoveryNodes nodes() { + return this.nodes; + } + + public ProtobufDiscoveryNodes getNodes() { + return nodes(); + } + + public Metadata metadata() { + return this.metadata; + } + + public Metadata getMetadata() { + return metadata(); + } + + public CoordinationMetadata coordinationMetadata() { + return metadata.coordinationMetadata(); + } + + public RoutingTable routingTable() { + return routingTable; + } + + public RoutingTable getRoutingTable() { + return routingTable(); + } + + public ClusterBlocks blocks() { + return this.blocks; + } + + public ClusterBlocks getBlocks() { + return blocks; + } + + public ImmutableOpenMap customs() { + return this.customs; + } + + public ImmutableOpenMap getCustoms() { + return this.customs; + } + + public T custom(String type) { + return (T) customs.get(type); + } + + @SuppressWarnings("unchecked") + public T custom(String type, T defaultValue) { + return (T) customs.getOrDefault(type, defaultValue); + } + + public ProtobufClusterName getClusterName() { + return this.clusterName; + } + + public VotingConfiguration getLastAcceptedConfiguration() { + return coordinationMetadata().getLastAcceptedConfiguration(); + } + + public VotingConfiguration getLastCommittedConfiguration() { + return coordinationMetadata().getLastCommittedConfiguration(); + } + + public Set getVotingConfigExclusions() { + return coordinationMetadata().getVotingConfigExclusions(); + } + + /** + * Returns a built (on demand) routing nodes view of the routing table. + */ + public RoutingNodes getRoutingNodes() { + if (routingNodes != null) { + return routingNodes; + } + routingNodes = new RoutingNodes(this); + return routingNodes; + } + + /** + * Metrics for cluster state. + * + * @opensearch.internal + */ + public enum Metric { + VERSION("version"), + + /** + * @deprecated As of 2.0, because promoting inclusive language, replaced by {@link #CLUSTER_MANAGER_NODE} + */ + @Deprecated + MASTER_NODE("master_node"), + CLUSTER_MANAGER_NODE("cluster_manager_node"), + BLOCKS("blocks"), + NODES("nodes"), + METADATA("metadata"), + ROUTING_TABLE("routing_table"), + ROUTING_NODES("routing_nodes"), + CUSTOMS("customs"); + + private static Map valueToEnum; + + static { + valueToEnum = new HashMap<>(); + for (Metric metric : Metric.values()) { + valueToEnum.put(metric.value, metric); + } + } + + private final String value; + + Metric(String value) { + this.value = value; + } + + public static EnumSet parseString(String param, boolean ignoreUnknown) { + String[] metrics = Strings.splitStringByCommaToArray(param); + EnumSet result = EnumSet.noneOf(Metric.class); + for (String metric : metrics) { + if ("_all".equals(metric)) { + result = EnumSet.allOf(Metric.class); + break; + } + Metric m = valueToEnum.get(metric); + if (m == null) { + if (!ignoreUnknown) { + throw new IllegalArgumentException("Unknown metric [" + metric + "]"); + } + } else { + result.add(m); + } + } + return result; + } + + @Override + public String toString() { + return value; + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + EnumSet metrics = Metric.parseString(params.param("metric", "_all"), true); + + // always provide the cluster_uuid as part of the top-level response (also part of the metadata response) + builder.field("cluster_uuid", metadata().clusterUUID()); + + if (metrics.contains(Metric.VERSION)) { + builder.field("version", version); + builder.field("state_uuid", stateUUID); + } + + if (metrics.contains(Metric.MASTER_NODE)) { + builder.field("master_node", nodes().getClusterManagerNodeId()); + } + + // Value of the field is identical with the above, and aims to replace the above field. + if (metrics.contains(Metric.CLUSTER_MANAGER_NODE)) { + builder.field("cluster_manager_node", nodes().getClusterManagerNodeId()); + } + + if (metrics.contains(Metric.BLOCKS)) { + builder.startObject("blocks"); + + if (!blocks().global().isEmpty()) { + builder.startObject("global"); + for (ClusterBlock block : blocks().global()) { + block.toXContent(builder, params); + } + builder.endObject(); + } + + if (!blocks().indices().isEmpty()) { + builder.startObject("indices"); + for (ObjectObjectCursor> entry : blocks().indices()) { + builder.startObject(entry.key); + for (ClusterBlock block : entry.value) { + block.toXContent(builder, params); + } + builder.endObject(); + } + builder.endObject(); + } + + builder.endObject(); + } + + // nodes + if (metrics.contains(Metric.NODES)) { + builder.startObject("nodes"); + for (DiscoveryNode node : nodes) { + node.toXContent(builder, params); + } + builder.endObject(); + } + + // meta data + if (metrics.contains(Metric.METADATA)) { + metadata.toXContent(builder, params); + } + + // routing table + if (metrics.contains(Metric.ROUTING_TABLE)) { + builder.startObject("routing_table"); + builder.startObject("indices"); + for (IndexRoutingTable indexRoutingTable : routingTable()) { + builder.startObject(indexRoutingTable.getIndex().getName()); + builder.startObject("shards"); + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + builder.startArray(Integer.toString(indexShardRoutingTable.shardId().id())); + for (ShardRouting shardRouting : indexShardRoutingTable) { + shardRouting.toXContent(builder, params); + } + builder.endArray(); + } + builder.endObject(); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + } + + // routing nodes + if (metrics.contains(Metric.ROUTING_NODES)) { + builder.startObject("routing_nodes"); + builder.startArray("unassigned"); + for (ShardRouting shardRouting : getRoutingNodes().unassigned()) { + shardRouting.toXContent(builder, params); + } + builder.endArray(); + + builder.startObject("nodes"); + for (RoutingNode routingNode : getRoutingNodes()) { + builder.startArray(routingNode.nodeId() == null ? "null" : routingNode.nodeId()); + for (ShardRouting shardRouting : routingNode) { + shardRouting.toXContent(builder, params); + } + builder.endArray(); + } + builder.endObject(); + + builder.endObject(); + } + if (metrics.contains(Metric.CUSTOMS)) { + for (ObjectObjectCursor cursor : customs) { + builder.startObject(cursor.key); + cursor.value.toXContent(builder, params); + builder.endObject(); + } + } + + return builder; + } + + public static Builder builder(ProtobufClusterName clusterName) { + return new Builder(clusterName); + } + + public static Builder builder(ProtobufClusterState state) { + return new Builder(state); + } + + /** + * Builder for cluster state. + * + * @opensearch.internal + */ + public static class Builder { + + private final ProtobufClusterName clusterName; + private long version = 0; + private String uuid = UNKNOWN_UUID; + private Metadata metadata = Metadata.EMPTY_METADATA; + private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE; + private ProtobufDiscoveryNodes nodes = ProtobufDiscoveryNodes.EMPTY_NODES; + private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK; + private final ImmutableOpenMap.Builder customs; + private boolean fromDiff; + private int minimumClusterManagerNodesOnPublishingClusterManager = -1; + + public Builder(ProtobufClusterState state) { + this.clusterName = state.clusterName; + this.version = state.version(); + this.uuid = state.stateUUID(); + this.nodes = state.nodes(); + this.routingTable = state.routingTable(); + this.metadata = state.metadata(); + this.blocks = state.blocks(); + this.customs = ImmutableOpenMap.builder(state.customs()); + this.minimumClusterManagerNodesOnPublishingClusterManager = state.minimumClusterManagerNodesOnPublishingClusterManager; + this.fromDiff = false; + } + + public Builder(ProtobufClusterName clusterName) { + customs = ImmutableOpenMap.builder(); + this.clusterName = clusterName; + } + + public Builder nodes(ProtobufDiscoveryNodes.Builder nodesBuilder) { + return nodes(nodesBuilder.build()); + } + + public Builder nodes(ProtobufDiscoveryNodes nodes) { + this.nodes = nodes; + return this; + } + + public ProtobufDiscoveryNodes nodes() { + return nodes; + } + + public Builder routingTable(RoutingTable routingTable) { + this.routingTable = routingTable; + return this; + } + + public Builder metadata(Metadata.Builder metadataBuilder) { + return metadata(metadataBuilder.build()); + } + + public Builder metadata(Metadata metadata) { + this.metadata = metadata; + return this; + } + + public Builder blocks(ClusterBlocks.Builder blocksBuilder) { + return blocks(blocksBuilder.build()); + } + + public Builder blocks(ClusterBlocks blocks) { + this.blocks = blocks; + return this; + } + + public Builder version(long version) { + this.version = version; + return this; + } + + public Builder incrementVersion() { + this.version = version + 1; + this.uuid = UNKNOWN_UUID; + return this; + } + + public Builder stateUUID(String uuid) { + this.uuid = uuid; + return this; + } + + public Builder minimumClusterManagerNodesOnPublishingClusterManager(int minimumClusterManagerNodesOnPublishingClusterManager) { + this.minimumClusterManagerNodesOnPublishingClusterManager = minimumClusterManagerNodesOnPublishingClusterManager; + return this; + } + + public Builder putCustom(String type, Custom custom) { + customs.put(type, Objects.requireNonNull(custom, type)); + return this; + } + + public Builder removeCustom(String type) { + customs.remove(type); + return this; + } + + public Builder customs(ImmutableOpenMap customs) { + StreamSupport.stream(customs.spliterator(), false).forEach(cursor -> Objects.requireNonNull(cursor.value, cursor.key)); + this.customs.putAll(customs); + return this; + } + + public Builder fromDiff(boolean fromDiff) { + this.fromDiff = fromDiff; + return this; + } + + public ProtobufClusterState build() { + if (UNKNOWN_UUID.equals(uuid)) { + uuid = UUIDs.randomBase64UUID(); + } + return new ProtobufClusterState( + clusterName, + version, + uuid, + metadata, + routingTable, + nodes, + blocks, + customs.build(), + minimumClusterManagerNodesOnPublishingClusterManager, + fromDiff + ); + } + + // public static byte[] toBytes(ProtobufClusterState state) throws IOException { + // BytesStreamOutput os = new BytesStreamOutput(); + // state.writeTo(os); + // return BytesReference.toBytes(os.bytes()); + // } + + // /** + // * @param data input bytes + // * @param localNode used to set the local node in the cluster state. + // */ + // public static ProtobufClusterState fromBytes(byte[] data, DiscoveryNode localNode, NamedWriteableRegistry registry) throws + // IOException { + // StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(data), registry); + // return readFrom(in, localNode); + + // } + } + + @Override + public ProtobufDiff diff(ProtobufClusterState previousState) { + return new ClusterStateDiff(previousState, this); + } + + public static ProtobufDiff readDiffFrom(CodedInputStream in, DiscoveryNode localNode) throws IOException { + return new ClusterStateDiff(in, localNode); + } + + public static ProtobufClusterState readFrom(CodedInputStream in, DiscoveryNode localNode) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + ProtobufClusterName clusterName = new ProtobufClusterName(in); + Builder builder = new Builder(clusterName); + builder.version = in.readInt64(); + builder.uuid = in.readString(); + // builder.metadata = Metadata.readFrom(in); + // builder.routingTable = RoutingTable.readFrom(in); + // builder.nodes = ProtobufDiscoveryNodes.readFrom(in, localNode); + // builder.blocks = ClusterBlocks.readFrom(in); + int customSize = in.readInt32(); + for (int i = 0; i < customSize; i++) { + Custom customIndexMetadata = protobufStreamInput.readNamedWriteable(Custom.class); + builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata); + } + builder.minimumClusterManagerNodesOnPublishingClusterManager = in.readInt32(); + return builder.build(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + clusterName.writeTo(out); + out.writeInt64NoTag(version); + out.writeStringNoTag(stateUUID); + // metadata.writeTo(out); + // routingTable.writeTo(out); + // nodes.writeTo(out); + // blocks.writeTo(out); + // filter out custom states not supported by the other node + int numberOfCustoms = 0; + for (final ObjectCursor cursor : customs.values()) { + if (FeatureAware.shouldSerialize(out, cursor.value)) { + numberOfCustoms++; + } + } + out.writeInt32NoTag(numberOfCustoms); + for (final ObjectCursor cursor : customs.values()) { + if (FeatureAware.shouldSerialize(out, cursor.value)) { + protobufStreamOutput.writeNamedWriteable(cursor.value, out); + } + } + out.writeInt32NoTag(minimumClusterManagerNodesOnPublishingClusterManager); + } + + /** + * The cluster state diff. + * + * @opensearch.internal + */ + private static class ClusterStateDiff implements ProtobufDiff { + + private final long toVersion; + + private final String fromUuid; + + private final String toUuid; + + private final ProtobufClusterName clusterName; + + private final ProtobufDiff routingTable; + + private final ProtobufDiff nodes; + + private final ProtobufDiff metadata; + + private final ProtobufDiff blocks; + + private final ProtobufDiff> customs; + + private final int minimumClusterManagerNodesOnPublishingClusterManager; + + ClusterStateDiff(ProtobufClusterState before, ProtobufClusterState after) { + fromUuid = before.stateUUID; + toUuid = after.stateUUID; + toVersion = after.version; + clusterName = after.clusterName; + routingTable = null; + nodes = null; + metadata = null; + blocks = null; + customs = ProtobufDiffableUtils.diff( + before.customs, + after.customs, + ProtobufDiffableUtils.getStringKeySerializer(), + CUSTOM_VALUE_SERIALIZER + ); + minimumClusterManagerNodesOnPublishingClusterManager = after.minimumClusterManagerNodesOnPublishingClusterManager; + } + + ClusterStateDiff(CodedInputStream in, DiscoveryNode localNode) throws IOException { + clusterName = new ProtobufClusterName(in); + fromUuid = in.readString(); + toUuid = in.readString(); + toVersion = in.readInt64(); + routingTable = null; + nodes = null; + metadata = null; + blocks = null; + customs = ProtobufDiffableUtils.readImmutableOpenMapDiff( + in, + ProtobufDiffableUtils.getStringKeySerializer(), + CUSTOM_VALUE_SERIALIZER + ); + minimumClusterManagerNodesOnPublishingClusterManager = in.readInt32(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + clusterName.writeTo(out); + out.writeStringNoTag(fromUuid); + out.writeStringNoTag(toUuid); + out.writeInt64NoTag(toVersion); + // routingTable.writeTo(out); + // nodes.writeTo(out); + // metadata.writeTo(out); + // blocks.writeTo(out); + // customs.writeTo(out); + // out.writeVInt(minimumClusterManagerNodesOnPublishingClusterManager); + } + + @Override + public ProtobufClusterState apply(ProtobufClusterState state) { + Builder builder = new Builder(clusterName); + if (toUuid.equals(state.stateUUID)) { + // no need to read the rest - cluster state didn't change + return state; + } + if (fromUuid.equals(state.stateUUID) == false) { + throw new IncompatibleClusterStateVersionException(state.version, state.stateUUID, toVersion, fromUuid); + } + builder.stateUUID(toUuid); + builder.version(toVersion); + builder.routingTable(routingTable.apply(state.routingTable)); + builder.nodes(nodes.apply(state.nodes)); + builder.metadata(metadata.apply(state.metadata)); + builder.blocks(blocks.apply(state.blocks)); + builder.customs(customs.apply(state.customs)); + builder.minimumClusterManagerNodesOnPublishingClusterManager(minimumClusterManagerNodesOnPublishingClusterManager); + builder.fromDiff(true); + return builder.build(); + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateApplier.java b/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateApplier.java new file mode 100644 index 0000000000000..3169688ba9b3e --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateApplier.java @@ -0,0 +1,29 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.cluster; + +import org.opensearch.cluster.service.ClusterService; + +/** + * A component that is in charge of applying an incoming cluster state to the node internal data structures. +* The single apply method is called before the cluster state becomes visible via {@link ClusterService#state()}. +* +* @opensearch.internal +*/ +public interface ProtobufClusterStateApplier { + + /** + * Called when a new cluster state ({@link ProtobufClusterChangedEvent#state()} needs to be applied. The cluster state to be applied is already + * committed when this method is called, so an applier must therefore be prepared to deal with any state it receives without throwing + * an exception. Throwing an exception from an applier is very bad because it will stop the application of this state before it has + * reached all the other appliers, and will likely result in another attempt to apply the same (or very similar) cluster state which + * might continue until this node is removed from the cluster. + */ + void applyClusterState(ProtobufClusterChangedEvent event); +} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufDiff.java b/server/src/main/java/org/opensearch/cluster/ProtobufDiff.java new file mode 100644 index 0000000000000..9d281ff10be04 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufDiff.java @@ -0,0 +1,29 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.cluster; + +import org.opensearch.common.io.stream.ProtobufWriteable; + +/** + * Represents difference between states of cluster state parts +* +* @opensearch.internal +*/ +public interface ProtobufDiff extends ProtobufWriteable { + + /** + * Applies difference to the specified part and returns the resulted part + */ + T apply(T part); +} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufDiffable.java b/server/src/main/java/org/opensearch/cluster/ProtobufDiffable.java new file mode 100644 index 0000000000000..80adeb739fbec --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufDiffable.java @@ -0,0 +1,30 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.cluster; + +import org.opensearch.common.io.stream.ProtobufWriteable; + +/** + * Cluster state part, changes in which can be serialized +* +* @opensearch.internal +*/ +public interface ProtobufDiffable extends ProtobufWriteable { + + /** + * Returns serializable object representing differences between this and previousState + */ + ProtobufDiff diff(T previousState); + +} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufDiffableUtils.java b/server/src/main/java/org/opensearch/cluster/ProtobufDiffableUtils.java new file mode 100644 index 0000000000000..52f4635c6f2e1 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufDiffableUtils.java @@ -0,0 +1,812 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.cluster; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import com.carrotsearch.hppc.cursors.IntCursor; +import com.carrotsearch.hppc.cursors.IntObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.opensearch.Version; +import org.opensearch.common.collect.ImmutableOpenIntMap; +import org.opensearch.common.collect.ImmutableOpenMap; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * Utility class for a diffable +* +* @opensearch.internal +*/ +public final class ProtobufDiffableUtils { + private ProtobufDiffableUtils() {} + + /** + * Returns a map key serializer for String keys + */ + public static KeySerializer getStringKeySerializer() { + return StringKeySerializer.INSTANCE; + } + + /** + * Returns a map key serializer for Integer keys. Encodes as Int. + */ + public static KeySerializer getIntKeySerializer() { + return IntKeySerializer.INSTANCE; + } + + /** + * Returns a map key serializer for Integer keys. Encodes as VInt. + */ + public static KeySerializer getVIntKeySerializer() { + return VIntKeySerializer.INSTANCE; + } + + /** + * Calculates diff between two ImmutableOpenMaps of Diffable objects + */ + public static > MapDiff> diff( + ImmutableOpenMap before, + ImmutableOpenMap after, + KeySerializer keySerializer + ) { + assert after != null && before != null; + return new ImmutableOpenMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance()); + } + + /** + * Calculates diff between two ImmutableOpenMaps of non-diffable objects + */ + public static MapDiff> diff( + ImmutableOpenMap before, + ImmutableOpenMap after, + KeySerializer keySerializer, + ValueSerializer valueSerializer + ) { + assert after != null && before != null; + return new ImmutableOpenMapDiff<>(before, after, keySerializer, valueSerializer); + } + + /** + * Calculates diff between two ImmutableOpenIntMaps of Diffable objects + */ + public static > MapDiff> diff( + ImmutableOpenIntMap before, + ImmutableOpenIntMap after, + KeySerializer keySerializer + ) { + assert after != null && before != null; + return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance()); + } + + /** + * Calculates diff between two ImmutableOpenIntMaps of non-diffable objects + */ + public static MapDiff> diff( + ImmutableOpenIntMap before, + ImmutableOpenIntMap after, + KeySerializer keySerializer, + ValueSerializer valueSerializer + ) { + assert after != null && before != null; + return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, valueSerializer); + } + + /** + * Calculates diff between two Maps of Diffable objects. + */ + public static > MapDiff> diff( + Map before, + Map after, + KeySerializer keySerializer + ) { + assert after != null && before != null; + return new JdkMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance()); + } + + /** + * Calculates diff between two Maps of non-diffable objects + */ + public static MapDiff> diff( + Map before, + Map after, + KeySerializer keySerializer, + ValueSerializer valueSerializer + ) { + assert after != null && before != null; + return new JdkMapDiff<>(before, after, keySerializer, valueSerializer); + } + + /** + * Loads an object that represents difference between two ImmutableOpenMaps + */ + public static MapDiff> readImmutableOpenMapDiff( + CodedInputStream in, + KeySerializer keySerializer, + ValueSerializer valueSerializer + ) throws IOException { + return new ImmutableOpenMapDiff<>(in, keySerializer, valueSerializer); + } + + /** + * Loads an object that represents difference between two ImmutableOpenMaps + */ + public static MapDiff> readImmutableOpenIntMapDiff( + CodedInputStream in, + KeySerializer keySerializer, + ValueSerializer valueSerializer + ) throws IOException { + return new ImmutableOpenIntMapDiff<>(in, keySerializer, valueSerializer); + } + + /** + * Loads an object that represents difference between two Maps of Diffable objects + */ + public static MapDiff> readJdkMapDiff( + CodedInputStream in, + KeySerializer keySerializer, + ValueSerializer valueSerializer + ) throws IOException { + return new JdkMapDiff<>(in, keySerializer, valueSerializer); + } + + /** + * Loads an object that represents difference between two ImmutableOpenMaps of Diffable objects using Diffable proto object + */ + public static > MapDiff> readImmutableOpenMapDiff( + CodedInputStream in, + KeySerializer keySerializer, + DiffableValueReader diffableValueReader + ) throws IOException { + return new ImmutableOpenMapDiff<>(in, keySerializer, diffableValueReader); + } + + /** + * Loads an object that represents difference between two ImmutableOpenIntMaps of Diffable objects using Diffable proto object + */ + public static > MapDiff> readImmutableOpenIntMapDiff( + CodedInputStream in, + KeySerializer keySerializer, + ProtobufWriteable.Reader reader, + ProtobufWriteable.Reader> diffReader + ) throws IOException { + return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader)); + } + + /** + * Loads an object that represents difference between two Maps of Diffable objects using Diffable proto object + */ + public static > MapDiff> readJdkMapDiff( + CodedInputStream in, + KeySerializer keySerializer, + ProtobufWriteable.Reader reader, + ProtobufWriteable.Reader> diffReader + ) throws IOException { + return new JdkMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader)); + } + + /** + * Represents differences between two Maps of (possibly diffable) objects. + * + * @param the diffable object + * + * @opensearch.internal + */ + private static class JdkMapDiff extends MapDiff> { + + protected JdkMapDiff(CodedInputStream in, KeySerializer keySerializer, ValueSerializer valueSerializer) + throws IOException { + super(in, keySerializer, valueSerializer); + } + + JdkMapDiff(Map before, Map after, KeySerializer keySerializer, ValueSerializer valueSerializer) { + super(keySerializer, valueSerializer); + assert after != null && before != null; + + for (K key : before.keySet()) { + if (!after.containsKey(key)) { + deletes.add(key); + } + } + + for (Map.Entry partIter : after.entrySet()) { + T beforePart = before.get(partIter.getKey()); + if (beforePart == null) { + upserts.put(partIter.getKey(), partIter.getValue()); + } else if (partIter.getValue().equals(beforePart) == false) { + if (valueSerializer.supportsDiffableValues()) { + diffs.put(partIter.getKey(), valueSerializer.diff(partIter.getValue(), beforePart)); + } else { + upserts.put(partIter.getKey(), partIter.getValue()); + } + } + } + } + + @Override + public Map apply(Map map) { + Map builder = new HashMap<>(map); + + for (K part : deletes) { + builder.remove(part); + } + + for (Map.Entry> diff : diffs.entrySet()) { + builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); + } + + for (Map.Entry upsert : upserts.entrySet()) { + builder.put(upsert.getKey(), upsert.getValue()); + } + return builder; + } + } + + /** + * Represents differences between two ImmutableOpenMap of (possibly diffable) objects + * + * @param the object type + * + * @opensearch.internal + */ + public static class ImmutableOpenMapDiff extends MapDiff> { + + protected ImmutableOpenMapDiff(CodedInputStream in, KeySerializer keySerializer, ValueSerializer valueSerializer) + throws IOException { + super(in, keySerializer, valueSerializer); + } + + private ImmutableOpenMapDiff( + KeySerializer keySerializer, + ValueSerializer valueSerializer, + List deletes, + Map> diffs, + Map upserts + ) { + super(keySerializer, valueSerializer, deletes, diffs, upserts); + } + + public ImmutableOpenMapDiff( + ImmutableOpenMap before, + ImmutableOpenMap after, + KeySerializer keySerializer, + ValueSerializer valueSerializer + ) { + super(keySerializer, valueSerializer); + assert after != null && before != null; + + for (ObjectCursor key : before.keys()) { + if (!after.containsKey(key.value)) { + deletes.add(key.value); + } + } + + for (ObjectObjectCursor partIter : after) { + T beforePart = before.get(partIter.key); + if (beforePart == null) { + upserts.put(partIter.key, partIter.value); + } else if (partIter.value.equals(beforePart) == false) { + if (valueSerializer.supportsDiffableValues()) { + diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart)); + } else { + upserts.put(partIter.key, partIter.value); + } + } + } + } + + /** + * Returns a new diff map with the given key removed, does not modify the invoking instance. + * If the key does not exist in the diff map, the same instance is returned. + */ + public ImmutableOpenMapDiff withKeyRemoved(K key) { + if (this.diffs.containsKey(key) == false && this.upserts.containsKey(key) == false) { + return this; + } + Map> newDiffs = new HashMap<>(this.diffs); + newDiffs.remove(key); + Map newUpserts = new HashMap<>(this.upserts); + newUpserts.remove(key); + return new ImmutableOpenMapDiff<>(this.keySerializer, this.valueSerializer, this.deletes, newDiffs, newUpserts); + } + + @Override + public ImmutableOpenMap apply(ImmutableOpenMap map) { + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); + builder.putAll(map); + + for (K part : deletes) { + builder.remove(part); + } + + for (Map.Entry> diff : diffs.entrySet()) { + builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); + } + + for (Map.Entry upsert : upserts.entrySet()) { + builder.put(upsert.getKey(), upsert.getValue()); + } + return builder.build(); + } + } + + /** + * Represents differences between two ImmutableOpenIntMap of (possibly diffable) objects + * + * @param the object type + * + * @opensearch.internal + */ + private static class ImmutableOpenIntMapDiff extends MapDiff> { + + protected ImmutableOpenIntMapDiff( + CodedInputStream in, + KeySerializer keySerializer, + ValueSerializer valueSerializer + ) throws IOException { + super(in, keySerializer, valueSerializer); + } + + ImmutableOpenIntMapDiff( + ImmutableOpenIntMap before, + ImmutableOpenIntMap after, + KeySerializer keySerializer, + ValueSerializer valueSerializer + ) { + super(keySerializer, valueSerializer); + assert after != null && before != null; + + for (IntCursor key : before.keys()) { + if (!after.containsKey(key.value)) { + deletes.add(key.value); + } + } + + for (IntObjectCursor partIter : after) { + T beforePart = before.get(partIter.key); + if (beforePart == null) { + upserts.put(partIter.key, partIter.value); + } else if (partIter.value.equals(beforePart) == false) { + if (valueSerializer.supportsDiffableValues()) { + diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart)); + } else { + upserts.put(partIter.key, partIter.value); + } + } + } + } + + @Override + public ImmutableOpenIntMap apply(ImmutableOpenIntMap map) { + ImmutableOpenIntMap.Builder builder = ImmutableOpenIntMap.builder(); + builder.putAll(map); + + for (Integer part : deletes) { + builder.remove(part); + } + + for (Map.Entry> diff : diffs.entrySet()) { + builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); + } + + for (Map.Entry upsert : upserts.entrySet()) { + builder.put(upsert.getKey(), upsert.getValue()); + } + return builder.build(); + } + } + + /** + * Represents differences between two maps of objects and is used as base class for different map implementations. + * + * Implements serialization. How differences are applied is left to subclasses. + * + * @param the type of map keys + * @param the type of map values + * @param the map implementation type + * + * @opensearch.internal + */ + public abstract static class MapDiff implements ProtobufDiff { + + protected final List deletes; + protected final Map> diffs; // incremental updates + protected final Map upserts; // additions or full updates + protected final KeySerializer keySerializer; + protected final ValueSerializer valueSerializer; + + protected MapDiff(KeySerializer keySerializer, ValueSerializer valueSerializer) { + this.keySerializer = keySerializer; + this.valueSerializer = valueSerializer; + deletes = new ArrayList<>(); + diffs = new HashMap<>(); + upserts = new HashMap<>(); + } + + protected MapDiff( + KeySerializer keySerializer, + ValueSerializer valueSerializer, + List deletes, + Map> diffs, + Map upserts + ) { + this.keySerializer = keySerializer; + this.valueSerializer = valueSerializer; + this.deletes = deletes; + this.diffs = diffs; + this.upserts = upserts; + } + + protected MapDiff(CodedInputStream in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + this.keySerializer = keySerializer; + this.valueSerializer = valueSerializer; + deletes = protobufStreamInput.readList(keySerializer::readKey, in); + int diffsCount = protobufStreamInput.readVInt(in); + diffs = diffsCount == 0 ? Collections.emptyMap() : new HashMap<>(diffsCount); + for (int i = 0; i < diffsCount; i++) { + K key = keySerializer.readKey(in); + ProtobufDiff diff = valueSerializer.readDiff(in, key); + diffs.put(key, diff); + } + int upsertsCount = protobufStreamInput.readVInt(in); + upserts = upsertsCount == 0 ? Collections.emptyMap() : new HashMap<>(upsertsCount); + for (int i = 0; i < upsertsCount; i++) { + K key = keySerializer.readKey(in); + T newValue = valueSerializer.read(in, key); + upserts.put(key, newValue); + } + } + + /** + * The keys that, when this diff is applied to a map, should be removed from the map. + * + * @return the list of keys that are deleted + */ + public List getDeletes() { + return deletes; + } + + /** + * Map entries that, when this diff is applied to a map, should be + * incrementally updated. The incremental update is represented using + * the {@link Diff} interface. + * + * @return the map entries that are incrementally updated + */ + public Map> getDiffs() { + return diffs; + } + + /** + * Map entries that, when this diff is applied to a map, should be + * added to the map or fully replace the previous value. + * + * @return the map entries that are additions or full updates + */ + public Map getUpserts() { + return upserts; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + protobufStreamOutput.writeCollection(deletes, (o, v) -> keySerializer.writeKey(v, o), out); + Version version = protobufStreamOutput.getVersion(); + // filter out custom states not supported by the other node + int diffCount = 0; + for (ProtobufDiff diff : diffs.values()) { + if (valueSerializer.supportsVersion(diff, version)) { + diffCount++; + } + } + out.writeInt32NoTag(diffCount); + for (Map.Entry> entry : diffs.entrySet()) { + if (valueSerializer.supportsVersion(entry.getValue(), version)) { + keySerializer.writeKey(entry.getKey(), out); + valueSerializer.writeDiff(entry.getValue(), out); + } + } + // filter out custom states not supported by the other node + int upsertsCount = 0; + for (T upsert : upserts.values()) { + if (valueSerializer.supportsVersion(upsert, version)) { + upsertsCount++; + } + } + out.writeInt32NoTag(upsertsCount); + for (Map.Entry entry : upserts.entrySet()) { + if (valueSerializer.supportsVersion(entry.getValue(), version)) { + keySerializer.writeKey(entry.getKey(), out); + valueSerializer.write(entry.getValue(), out); + } + } + } + } + + /** + * Provides read and write operations to serialize keys of map + * @param type of key + * + * @opensearch.internal + */ + public interface KeySerializer { + void writeKey(K key, CodedOutputStream out) throws IOException; + + K readKey(CodedInputStream in) throws IOException; + } + + /** + * Serializes String keys of a map + * + * @opensearch.internal + */ + private static final class StringKeySerializer implements KeySerializer { + private static final StringKeySerializer INSTANCE = new StringKeySerializer(); + + @Override + public void writeKey(String key, CodedOutputStream out) throws IOException { + out.writeStringNoTag(key); + } + + @Override + public String readKey(CodedInputStream in) throws IOException { + return in.readString(); + } + } + + /** + * Serializes Integer keys of a map as an Int + * + * @opensearch.internal + */ + private static final class IntKeySerializer implements KeySerializer { + public static final IntKeySerializer INSTANCE = new IntKeySerializer(); + + @Override + public void writeKey(Integer key, CodedOutputStream out) throws IOException { + out.writeInt32NoTag(key); + } + + @Override + public Integer readKey(CodedInputStream in) throws IOException { + return in.readInt32(); + } + } + + /** + * Serializes Integer keys of a map as a VInt. Requires keys to be positive. + * + * @opensearch.internal + */ + private static final class VIntKeySerializer implements KeySerializer { + public static final IntKeySerializer INSTANCE = new IntKeySerializer(); + + @Override + public void writeKey(Integer key, CodedOutputStream out) throws IOException { + if (key < 0) { + throw new IllegalArgumentException("Map key [" + key + "] must be positive"); + } + out.writeInt32NoTag(key); + } + + @Override + public Integer readKey(CodedInputStream in) throws IOException { + return in.readInt32(); + } + } + + /** + * Provides read and write operations to serialize map values. + * Reading of values can be made dependent on map key. + * + * Also provides operations to distinguish whether map values are diffable. + * + * Should not be directly implemented, instead implement either + * {@link DiffableValueSerializer} or {@link NonDiffableValueSerializer}. + * + * @param key type of map + * @param value type of map + * + * @opensearch.internal + */ + public interface ValueSerializer { + + /** + * Writes value to stream + */ + void write(V value, CodedOutputStream out) throws IOException; + + /** + * Reads value from stream. Reading operation can be made dependent on map key. + */ + V read(CodedInputStream in, K key) throws IOException; + + /** + * Whether this serializer supports diffable values + */ + boolean supportsDiffableValues(); + + /** + * Whether this serializer supports the version of the output stream + */ + default boolean supportsVersion(ProtobufDiff value, Version version) { + return true; + } + + /** + * Whether this serializer supports the version of the output stream + */ + default boolean supportsVersion(V value, Version version) { + return true; + } + + /** + * Computes diff if this serializer supports diffable values + */ + ProtobufDiff diff(V value, V beforePart); + + /** + * Writes value as diff to stream if this serializer supports diffable values + */ + void writeDiff(ProtobufDiff value, CodedOutputStream out) throws IOException; + + /** + * Reads value as diff from stream if this serializer supports diffable values. + * Reading operation can be made dependent on map key. + */ + ProtobufDiff readDiff(CodedInputStream in, K key) throws IOException; + } + + /** + * Serializer for Diffable map values. Needs to implement read and readDiff methods. + * + * @param type of map keys + * @param type of map values + * + * @opensearch.internal + */ + public abstract static class DiffableValueSerializer> implements ValueSerializer { + private static final DiffableValueSerializer WRITE_ONLY_INSTANCE = new DiffableValueSerializer() { + @Override + public Object read(CodedInputStream in, Object key) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public ProtobufDiff readDiff(CodedInputStream in, Object key) throws IOException { + throw new UnsupportedOperationException(); + } + }; + + private static > DiffableValueSerializer getWriteOnlyInstance() { + return WRITE_ONLY_INSTANCE; + } + + @Override + public boolean supportsDiffableValues() { + return true; + } + + @Override + public ProtobufDiff diff(V value, V beforePart) { + return value.diff(beforePart); + } + + @Override + public void write(V value, CodedOutputStream out) throws IOException { + value.writeTo(out); + } + + public void writeDiff(ProtobufDiff value, CodedOutputStream out) throws IOException { + value.writeTo(out); + } + } + + /** + * Serializer for non-diffable map values + * + * @param type of map keys + * @param type of map values + * + * @opensearch.internal + */ + public abstract static class NonDiffableValueSerializer implements ValueSerializer { + @Override + public boolean supportsDiffableValues() { + return false; + } + + @Override + public ProtobufDiff diff(V value, V beforePart) { + throw new UnsupportedOperationException(); + } + + @Override + public void writeDiff(ProtobufDiff value, CodedOutputStream out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public ProtobufDiff readDiff(CodedInputStream in, K key) throws IOException { + throw new UnsupportedOperationException(); + } + } + + /** + * Implementation of the ValueSerializer that wraps value and diff readers. + * + * Note: this implementation is ignoring the key. + * + * @opensearch.internal + */ + public static class DiffableValueReader> extends DiffableValueSerializer { + private final ProtobufWriteable.Reader reader; + private final ProtobufWriteable.Reader> diffReader; + + public DiffableValueReader(ProtobufWriteable.Reader reader, ProtobufWriteable.Reader> diffReader) { + this.reader = reader; + this.diffReader = diffReader; + } + + @Override + public V read(CodedInputStream in, K key) throws IOException { + return reader.read(in); + } + + @Override + public ProtobufDiff readDiff(CodedInputStream in, K key) throws IOException { + return diffReader.read(in); + } + } + + /** + * Implementation of ValueSerializer that serializes immutable sets + * + * @param type of map key + * + * @opensearch.internal + */ + public static class StringSetValueSerializer extends NonDiffableValueSerializer> { + private static final StringSetValueSerializer INSTANCE = new StringSetValueSerializer(); + + public static StringSetValueSerializer getInstance() { + return INSTANCE; + } + + @Override + public void write(Set value, CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + protobufStreamOutput.writeCollection(value, CodedOutputStream::writeStringNoTag, out); + } + + @Override + public Set read(CodedInputStream in, K key) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(protobufStreamInput.readStringArray(in)))); + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufNamedDiff.java b/server/src/main/java/org/opensearch/cluster/ProtobufNamedDiff.java new file mode 100644 index 0000000000000..85f6e29d31458 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufNamedDiff.java @@ -0,0 +1,32 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.cluster; + +import org.opensearch.Version; +import org.opensearch.common.io.stream.ProtobufNamedWriteable; + +/** + * Diff that also support NamedWriteable interface +* +* @opensearch.internal +*/ +public interface ProtobufNamedDiff> extends ProtobufDiff, ProtobufNamedWriteable { + /** + * The minimal version of the recipient this custom object can be sent to + */ + default Version getMinimalSupportedVersion() { + return Version.CURRENT.minimumIndexCompatibilityVersion(); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufNamedDiffable.java b/server/src/main/java/org/opensearch/cluster/ProtobufNamedDiffable.java new file mode 100644 index 0000000000000..4c1ff98666e5f --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufNamedDiffable.java @@ -0,0 +1,24 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.cluster; + +import org.opensearch.common.io.stream.ProtobufVersionedNamedWriteable; +import org.opensearch.common.io.stream.VersionedNamedWriteable; + +/** + * Diff that also support {@link VersionedNamedWriteable} interface +* +* @opensearch.internal +*/ +public interface ProtobufNamedDiffable extends ProtobufDiffable, ProtobufVersionedNamedWriteable {} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufNamedDiffableValueSerializer.java b/server/src/main/java/org/opensearch/cluster/ProtobufNamedDiffableValueSerializer.java new file mode 100644 index 0000000000000..e7b3720256b09 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufNamedDiffableValueSerializer.java @@ -0,0 +1,59 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.cluster; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.Version; +import org.opensearch.common.io.stream.ProtobufStreamInput; + +import java.io.IOException; + +/** + * Value Serializer for named diffables +* +* @opensearch.internal +*/ +public class ProtobufNamedDiffableValueSerializer> extends ProtobufDiffableUtils.DiffableValueSerializer< + String, + T> { + + private final Class tClass; + + public ProtobufNamedDiffableValueSerializer(Class tClass) { + this.tClass = tClass; + } + + @Override + public T read(CodedInputStream in, String key) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + return protobufStreamInput.readNamedWriteable(tClass, key); + } + + @Override + public boolean supportsVersion(ProtobufDiff value, Version version) { + return version.onOrAfter(((ProtobufNamedDiff) value).getMinimalSupportedVersion()); + } + + @Override + public boolean supportsVersion(T value, Version version) { + return version.onOrAfter(value.getMinimalSupportedVersion()); + } + + @SuppressWarnings("unchecked") + @Override + public ProtobufDiff readDiff(CodedInputStream in, String key) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + return protobufStreamInput.readNamedWriteable(ProtobufNamedDiff.class, key); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNode.java new file mode 100644 index 0000000000000..11b1762b05108 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNode.java @@ -0,0 +1,518 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.cluster.node; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.Version; +import org.opensearch.common.UUIDs; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.ProtobufTransportAddress; +import org.opensearch.node.Node; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.node.NodeRoleSettings.NODE_ROLES_SETTING; + +/** + * A discovery node represents a node that is part of the cluster. +* +* @opensearch.internal +*/ +public class ProtobufDiscoveryNode implements ProtobufWriteable { + + static final String COORDINATING_ONLY = "coordinating_only"; + + public static boolean nodeRequiresLocalStorage(Settings settings) { + boolean localStorageEnable = Node.NODE_LOCAL_STORAGE_SETTING.get(settings); + if (localStorageEnable == false && (isDataNode(settings) || isClusterManagerNode(settings))) { + // TODO: make this a proper setting validation logic, requiring multi-settings validation + throw new IllegalArgumentException("storage can not be disabled for cluster-manager and data nodes"); + } + return localStorageEnable; + } + + public static boolean hasRole(final Settings settings, final DiscoveryNodeRole role) { + /* + * This method can be called before the o.e.n.NodeRoleSettings.NODE_ROLES_SETTING is initialized. We do not want to trigger + * initialization prematurely because that will bake the default roles before plugins have had a chance to register them. Therefore, + * to avoid initializing this setting prematurely, we avoid using the actual node roles setting instance here. + */ + if (settings.hasValue("node.roles")) { + return settings.getAsList("node.roles").contains(role.roleName()); + } else if (role.legacySetting() != null && settings.hasValue(role.legacySetting().getKey())) { + return role.legacySetting().get(settings); + } else { + return role.isEnabledByDefault(settings); + } + } + + public static boolean isClusterManagerNode(Settings settings) { + return hasRole(settings, DiscoveryNodeRole.MASTER_ROLE) || hasRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerNode(Settings)} */ + @Deprecated + public static boolean isMasterNode(Settings settings) { + return isClusterManagerNode(settings); + } + + /** + * Due to the way that plugins may not be available when settings are being initialized, + * not all roles may be available from a static/initializing context such as a {@link Setting} + * default value function. In that case, be warned that this may not include all plugin roles. + */ + public static boolean isDataNode(final Settings settings) { + return getRolesFromSettings(settings).stream().anyMatch(DiscoveryNodeRole::canContainData); + } + + public static boolean isIngestNode(Settings settings) { + return hasRole(settings, DiscoveryNodeRole.INGEST_ROLE); + } + + public static boolean isRemoteClusterClient(final Settings settings) { + return hasRole(settings, DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE); + } + + public static boolean isSearchNode(Settings settings) { + return hasRole(settings, DiscoveryNodeRole.SEARCH_ROLE); + } + + private final String nodeName; + private final String nodeId; + private final String ephemeralId; + private final String hostName; + private final String hostAddress; + private final ProtobufTransportAddress address; + private final Map attributes; + private final Version version; + private final SortedSet roles; + + /** + * Creates a new {@link ProtobufDiscoveryNode} + *

+ * Note: if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current + * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used + * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered + * and updated. + *

+ * + * @param id the nodes unique (persistent) node id. This constructor will auto generate a random ephemeral id. + * @param address the nodes transport address + * @param version the version of the node + */ + public ProtobufDiscoveryNode(final String id, ProtobufTransportAddress address, Version version) { + this(id, address, Collections.emptyMap(), DiscoveryNodeRole.BUILT_IN_ROLES, version); + } + + /** + * Creates a new {@link ProtobufDiscoveryNode} + *

+ * Note: if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current + * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used + * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered + * and updated. + *

+ * + * @param id the nodes unique (persistent) node id. This constructor will auto generate a random ephemeral id. + * @param address the nodes transport address + * @param attributes node attributes + * @param roles node roles + * @param version the version of the node + */ + public ProtobufDiscoveryNode( + String id, + ProtobufTransportAddress address, + Map attributes, + Set roles, + Version version + ) { + this("", id, address, attributes, roles, version); + } + + /** + * Creates a new {@link ProtobufDiscoveryNode} + *

+ * Note: if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current + * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used + * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered + * and updated. + *

+ * + * @param nodeName the nodes name + * @param nodeId the nodes unique persistent id. An ephemeral id will be auto generated. + * @param address the nodes transport address + * @param attributes node attributes + * @param roles node roles + * @param version the version of the node + */ + public ProtobufDiscoveryNode( + String nodeName, + String nodeId, + ProtobufTransportAddress address, + Map attributes, + Set roles, + Version version + ) { + this( + nodeName, + nodeId, + UUIDs.randomBase64UUID(), + address.address().getHostString(), + address.getAddress(), + address, + attributes, + roles, + version + ); + } + + /** + * Creates a new {@link ProtobufDiscoveryNode}. + *

+ * Note: if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current + * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used + * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered + * and updated. + *

+ * + * @param nodeName the nodes name + * @param nodeId the nodes unique persistent id + * @param ephemeralId the nodes unique ephemeral id + * @param hostAddress the nodes host address + * @param address the nodes transport address + * @param attributes node attributes + * @param roles node roles + * @param version the version of the node + */ + public ProtobufDiscoveryNode( + String nodeName, + String nodeId, + String ephemeralId, + String hostName, + String hostAddress, + ProtobufTransportAddress address, + Map attributes, + Set roles, + Version version + ) { + if (nodeName != null) { + this.nodeName = nodeName.intern(); + } else { + this.nodeName = ""; + } + this.nodeId = nodeId.intern(); + this.ephemeralId = ephemeralId.intern(); + this.hostName = hostName.intern(); + this.hostAddress = hostAddress.intern(); + this.address = address; + if (version == null) { + this.version = Version.CURRENT; + } else { + this.version = version; + } + this.attributes = Collections.unmodifiableMap(attributes); + // verify that no node roles are being provided as attributes + Predicate> predicate = (attrs) -> { + boolean success = true; + for (final DiscoveryNodeRole role : ProtobufDiscoveryNode.roleMap.values()) { + success &= attrs.containsKey(role.roleName()) == false; + assert success : role.roleName(); + } + return success; + }; + assert predicate.test(attributes) : attributes; + this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(roles)); + } + + /** Creates a ProtobufDiscoveryNode representing the local node. */ + public static ProtobufDiscoveryNode createLocal(Settings settings, ProtobufTransportAddress publishAddress, String nodeId) { + Map attributes = Node.NODE_ATTRIBUTES.getAsMap(settings); + Set roles = getRolesFromSettings(settings); + return new ProtobufDiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress, attributes, roles, Version.CURRENT); + } + + /** extract node roles from the given settings */ + public static Set getRolesFromSettings(final Settings settings) { + if (NODE_ROLES_SETTING.exists(settings)) { + validateLegacySettings(settings, roleMap); + return Collections.unmodifiableSet(new HashSet<>(NODE_ROLES_SETTING.get(settings))); + } else { + return roleMap.values().stream().filter(s -> s.isEnabledByDefault(settings)).collect(Collectors.toSet()); + } + } + + private static void validateLegacySettings(final Settings settings, final Map roleMap) { + for (final DiscoveryNodeRole role : roleMap.values()) { + if (role.legacySetting() != null && role.legacySetting().exists(settings)) { + final String message = String.format( + Locale.ROOT, + "can not explicitly configure node roles and use legacy role setting [%s]=[%s]", + role.legacySetting().getKey(), + role.legacySetting().get(settings) + ); + throw new IllegalArgumentException(message); + } + } + } + + /** + * Creates a new {@link ProtobufDiscoveryNode} by reading from the stream provided as argument + * @param in the stream + * @throws IOException if there is an error while reading from the stream + */ + public ProtobufDiscoveryNode(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + this.nodeName = in.readString(); + this.nodeId = in.readString(); + this.ephemeralId = in.readString(); + this.hostName = in.readString(); + this.hostAddress = in.readString(); + this.address = new ProtobufTransportAddress(in); + int size = in.readInt32(); + this.attributes = new HashMap<>(size); + for (int i = 0; i < size; i++) { + this.attributes.put(in.readString(), in.readString()); + } + int rolesSize = in.readInt32(); + final Set roles = new HashSet<>(rolesSize); + for (int i = 0; i < rolesSize; i++) { + final String roleName = in.readString(); + final String roleNameAbbreviation = in.readString(); + final boolean canContainData = in.readBool(); + final DiscoveryNodeRole role = roleMap.get(roleName); + if (role == null) { + if (protobufStreamInput.getVersion().onOrAfter(Version.V_2_1_0)) { + roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); + } else { + roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); + } + } else { + assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; + assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" + + roleName + + "] does not match role [" + + role.roleNameAbbreviation() + + "]"; + roles.add(role); + } + } + this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(roles)); + this.version = Version.readVersionProtobuf(in); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + out.writeStringNoTag(nodeName); + out.writeStringNoTag(nodeId); + out.writeStringNoTag(ephemeralId); + out.writeStringNoTag(hostName); + out.writeStringNoTag(hostAddress); + address.writeTo(out); + out.writeInt32NoTag(attributes.size()); + for (Map.Entry entry : attributes.entrySet()) { + out.writeStringNoTag(entry.getKey()); + out.writeStringNoTag(entry.getValue()); + } + out.writeInt32NoTag(roles.size()); + for (final DiscoveryNodeRole role : roles) { + final DiscoveryNodeRole compatibleRole = role.getCompatibilityRole(protobufStreamOutput.getVersion()); + out.writeStringNoTag(compatibleRole.roleName()); + out.writeStringNoTag(compatibleRole.roleNameAbbreviation()); + out.writeBoolNoTag(compatibleRole.canContainData()); + } + out.writeInt32NoTag(version.id); + } + + /** + * The address that the node can be communicated with. + */ + public ProtobufTransportAddress getAddress() { + return address; + } + + /** + * The unique id of the node. + */ + public String getId() { + return nodeId; + } + + /** + * The unique ephemeral id of the node. Ephemeral ids are meant to be attached the life span + * of a node process. When ever a node is restarted, it's ephemeral id is required to change (while it's {@link #getId()} + * will be read from the data folder and will remain the same across restarts). + */ + public String getEphemeralId() { + return ephemeralId; + } + + /** + * The name of the node. + */ + public String getName() { + return this.nodeName; + } + + /** + * The node attributes. + */ + public Map getAttributes() { + return this.attributes; + } + + /** + * Should this node hold data (shards) or not. + */ + public boolean isDataNode() { + return roles.stream().anyMatch(DiscoveryNodeRole::canContainData); + } + + /** + * Can this node become cluster-manager or not. + */ + public boolean isClusterManagerNode() { + return roles.contains(DiscoveryNodeRole.MASTER_ROLE) || roles.contains(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); + } + + /** + * Can this node become cluster-manager or not. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerNode()} + */ + @Deprecated + public boolean isMasterNode() { + return isClusterManagerNode(); + } + + /** + * Returns a boolean that tells whether this an ingest node or not + */ + public boolean isIngestNode() { + return roles.contains(DiscoveryNodeRole.INGEST_ROLE); + } + + /** + * Returns whether or not the node can be a remote cluster client. + * + * @return true if the node can be a remote cluster client, false otherwise + */ + public boolean isRemoteClusterClient() { + return roles.contains(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE); + } + + /** + * Returns whether the node is dedicated to provide search capability. + * + * @return true if the node contains search role, false otherwise + */ + public boolean isSearchNode() { + return roles.contains(DiscoveryNodeRole.SEARCH_ROLE); + } + + /** + * Returns a set of all the roles that the node has. The roles are returned in sorted order by the role name. + *

+ * If a node does not have any specific role, the returned set is empty, which means that the node is a coordinating-only node. + * + * @return the sorted set of roles + */ + public Set getRoles() { + return roles; + } + + public Version getVersion() { + return this.version; + } + + public String getHostName() { + return this.hostName; + } + + public String getHostAddress() { + return this.hostAddress; + } + + private static Map rolesToMap(final Stream roles) { + return Collections.unmodifiableMap(roles.collect(Collectors.toMap(DiscoveryNodeRole::roleName, Function.identity()))); + } + + private static Map roleMap = rolesToMap(DiscoveryNodeRole.BUILT_IN_ROLES.stream()); + + public static DiscoveryNodeRole getRoleFromRoleName(final String roleName) { + // As we are supporting dynamic role, should make role name case-insensitive to avoid confusion of role name like "Data"/"DATA" + String lowerCasedRoleName = Objects.requireNonNull(roleName).toLowerCase(Locale.ROOT); + if (roleMap.containsKey(lowerCasedRoleName)) { + return roleMap.get(lowerCasedRoleName); + } + return new DiscoveryNodeRole.DynamicRole(lowerCasedRoleName, lowerCasedRoleName, false); + } + + public static Set getPossibleRoles() { + return Collections.unmodifiableSet(new HashSet<>(roleMap.values())); + } + + public static void setAdditionalRoles(final Set additionalRoles) { + assert additionalRoles.stream().allMatch(r -> r.legacySetting() == null || r.legacySetting().isDeprecated()) : additionalRoles; + final Map roleNameToPossibleRoles = rolesToMap( + Stream.concat(DiscoveryNodeRole.BUILT_IN_ROLES.stream(), additionalRoles.stream()) + ); + // collect the abbreviation names into a map to ensure that there are not any duplicate abbreviations + final Map roleNameAbbreviationToPossibleRoles = Collections.unmodifiableMap( + roleNameToPossibleRoles.values() + .stream() + .collect(Collectors.toMap(DiscoveryNodeRole::roleNameAbbreviation, Function.identity())) + ); + assert roleNameToPossibleRoles.size() == roleNameAbbreviationToPossibleRoles.size() : "roles by name [" + + roleNameToPossibleRoles + + "], roles by name abbreviation [" + + roleNameAbbreviationToPossibleRoles + + "]"; + roleMap = roleNameToPossibleRoles; + } + + /** + * Load the deprecated {@link DiscoveryNodeRole#MASTER_ROLE}. + * Master role is not added into BUILT_IN_ROLES, because {@link #setAdditionalRoles(Set)} check role name abbreviation duplication, + * and CLUSTER_MANAGER_ROLE has the same abbreviation name with MASTER_ROLE. + */ + public static void setDeprecatedMasterRole() { + final Map modifiableRoleMap = new HashMap<>(roleMap); + modifiableRoleMap.put(DiscoveryNodeRole.MASTER_ROLE.roleName(), DiscoveryNodeRole.MASTER_ROLE); + roleMap = Collections.unmodifiableMap(modifiableRoleMap); + } + + public static Set getPossibleRoleNames() { + return roleMap.keySet(); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNodes.java new file mode 100644 index 0000000000000..d6a19ef75a134 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNodes.java @@ -0,0 +1,886 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.cluster.node; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import com.carrotsearch.hppc.ObjectHashSet; +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.opensearch.Version; +import org.opensearch.cluster.AbstractDiffable; +import org.opensearch.cluster.Diff; +import org.opensearch.common.Booleans; +import org.opensearch.common.Nullable; +import org.opensearch.common.Strings; +import org.opensearch.common.collect.ImmutableOpenMap; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.regex.Regex; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.util.set.Sets; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +/** + * This class holds all {@link ProtobufDiscoveryNode} in the cluster and provides convenience methods to +* access, modify merge / diff discovery nodes. +* +* @opensearch.internal +*/ +public class ProtobufDiscoveryNodes extends AbstractDiffable implements Iterable { + + public static final ProtobufDiscoveryNodes EMPTY_NODES = builder().build(); + + private final ImmutableOpenMap nodes; + private final ImmutableOpenMap dataNodes; + private final ImmutableOpenMap clusterManagerNodes; + private final ImmutableOpenMap ingestNodes; + + private final String clusterManagerNodeId; + private final String localNodeId; + private final Version minNonClientNodeVersion; + private final Version maxNonClientNodeVersion; + private final Version maxNodeVersion; + private final Version minNodeVersion; + + private ProtobufDiscoveryNodes( + ImmutableOpenMap nodes, + ImmutableOpenMap dataNodes, + ImmutableOpenMap clusterManagerNodes, + ImmutableOpenMap ingestNodes, + String clusterManagerNodeId, + String localNodeId, + Version minNonClientNodeVersion, + Version maxNonClientNodeVersion, + Version maxNodeVersion, + Version minNodeVersion + ) { + this.nodes = nodes; + this.dataNodes = dataNodes; + this.clusterManagerNodes = clusterManagerNodes; + this.ingestNodes = ingestNodes; + this.clusterManagerNodeId = clusterManagerNodeId; + this.localNodeId = localNodeId; + this.minNonClientNodeVersion = minNonClientNodeVersion; + this.maxNonClientNodeVersion = maxNonClientNodeVersion; + this.minNodeVersion = minNodeVersion; + this.maxNodeVersion = maxNodeVersion; + } + + @Override + public Iterator iterator() { + return nodes.valuesIt(); + } + + /** + * Returns {@code true} if the local node is the elected cluster-manager node. + */ + public boolean isLocalNodeElectedClusterManager() { + if (localNodeId == null) { + // we don't know yet the local node id, return false + return false; + } + return localNodeId.equals(clusterManagerNodeId); + } + + /** + * Returns {@code true} if the local node is the elected cluster-manager node. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isLocalNodeElectedClusterManager()} + */ + @Deprecated + public boolean isLocalNodeElectedMaster() { + return isLocalNodeElectedClusterManager(); + } + + /** + * Get the number of known nodes + * + * @return number of nodes + */ + public int getSize() { + return nodes.size(); + } + + /** + * Get a {@link Map} of the discovered nodes arranged by their ids + * + * @return {@link Map} of the discovered nodes arranged by their ids + */ + public ImmutableOpenMap getNodes() { + return this.nodes; + } + + /** + * Get a {@link Map} of the discovered data nodes arranged by their ids + * + * @return {@link Map} of the discovered data nodes arranged by their ids + */ + public ImmutableOpenMap getDataNodes() { + return this.dataNodes; + } + + /** + * Get a {@link Map} of the discovered cluster-manager nodes arranged by their ids + * + * @return {@link Map} of the discovered cluster-manager nodes arranged by their ids + */ + public ImmutableOpenMap getClusterManagerNodes() { + return this.clusterManagerNodes; + } + + /** + * Get a {@link Map} of the discovered cluster-manager nodes arranged by their ids + * + * @return {@link Map} of the discovered cluster-manager nodes arranged by their ids + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNodes()} + */ + @Deprecated + public ImmutableOpenMap getMasterNodes() { + return getClusterManagerNodes(); + } + + /** + * @return All the ingest nodes arranged by their ids + */ + public ImmutableOpenMap getIngestNodes() { + return ingestNodes; + } + + /** + * Get a {@link Map} of the discovered cluster-manager and data nodes arranged by their ids + * + * @return {@link Map} of the discovered cluster-manager and data nodes arranged by their ids + */ + public ImmutableOpenMap getClusterManagerAndDataNodes() { + ImmutableOpenMap.Builder nodes = ImmutableOpenMap.builder(dataNodes); + nodes.putAll(clusterManagerNodes); + return nodes.build(); + } + + /** + * Get a {@link Map} of the discovered cluster-manager and data nodes arranged by their ids + * + * @return {@link Map} of the discovered cluster-manager and data nodes arranged by their ids + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerAndDataNodes()} + */ + @Deprecated + public ImmutableOpenMap getMasterAndDataNodes() { + return getClusterManagerAndDataNodes(); + } + + /** + * Get a {@link Map} of the coordinating only nodes (nodes which are neither cluster-manager, nor data, nor ingest nodes) arranged by their ids + * + * @return {@link Map} of the coordinating only nodes arranged by their ids + */ + public ImmutableOpenMap getCoordinatingOnlyNodes() { + ImmutableOpenMap.Builder nodes = ImmutableOpenMap.builder(this.nodes); + nodes.removeAll(clusterManagerNodes.keys()); + nodes.removeAll(dataNodes.keys()); + nodes.removeAll(ingestNodes.keys()); + return nodes.build(); + } + + /** + * Returns a stream of all nodes, with cluster-manager nodes at the front + */ + public Stream clusterManagersFirstStream() { + return Stream.concat( + StreamSupport.stream(clusterManagerNodes.spliterator(), false).map(cur -> cur.value), + StreamSupport.stream(this.spliterator(), false).filter(n -> n.isClusterManagerNode() == false) + ); + } + + /** + * Returns a stream of all nodes, with cluster-manager nodes at the front + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagersFirstStream()} + */ + @Deprecated + public Stream mastersFirstStream() { + return clusterManagersFirstStream(); + } + + /** + * Get a node by its id + * + * @param nodeId id of the wanted node + * @return wanted node if it exists. Otherwise null + */ + public ProtobufDiscoveryNode get(String nodeId) { + return nodes.get(nodeId); + } + + /** + * Determine if a given node id exists + * + * @param nodeId id of the node which existence should be verified + * @return true if the node exists. Otherwise false + */ + public boolean nodeExists(String nodeId) { + return nodes.containsKey(nodeId); + } + + /** + * Determine if a given node exists + * + * @param node of the node which existence should be verified + * @return true if the node exists. Otherwise false + */ + public boolean nodeExists(ProtobufDiscoveryNode node) { + ProtobufDiscoveryNode existing = nodes.get(node.getId()); + return existing != null && existing.equals(node); + } + + /** + * Determine if the given node exists and has the right roles. Supported roles vary by version, and our local cluster state might + * have come via an older cluster-manager, so the roles may differ even if the node is otherwise identical. + */ + public boolean nodeExistsWithSameRoles(ProtobufDiscoveryNode discoveryNode) { + final ProtobufDiscoveryNode existing = nodes.get(discoveryNode.getId()); + return existing != null && existing.equals(discoveryNode) && existing.getRoles().equals(discoveryNode.getRoles()); + } + + /** + * Get the id of the cluster-manager node + * + * @return id of the cluster-manager + */ + public String getClusterManagerNodeId() { + return this.clusterManagerNodeId; + } + + /** + * Get the id of the cluster-manager node + * + * @return id of the cluster-manager + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNodeId()} + */ + @Deprecated + public String getMasterNodeId() { + return getClusterManagerNodeId(); + } + + /** + * Get the id of the local node + * + * @return id of the local node + */ + public String getLocalNodeId() { + return this.localNodeId; + } + + /** + * Get the local node + * + * @return local node + */ + public ProtobufDiscoveryNode getLocalNode() { + return nodes.get(localNodeId); + } + + /** + * Returns the cluster-manager node, or {@code null} if there is no cluster-manager node + */ + @Nullable + public ProtobufDiscoveryNode getClusterManagerNode() { + if (clusterManagerNodeId != null) { + return nodes.get(clusterManagerNodeId); + } + return null; + } + + /** + * Returns the cluster-manager node, or {@code null} if there is no cluster-manager node + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNode()} + */ + @Deprecated + @Nullable + public ProtobufDiscoveryNode getMasterNode() { + return getClusterManagerNode(); + } + + /** + * Get a node by its address + * + * @param address {@link TransportAddress} of the wanted node + * @return node identified by the given address or null if no such node exists + */ + public ProtobufDiscoveryNode findByAddress(TransportAddress address) { + for (ObjectCursor cursor : nodes.values()) { + ProtobufDiscoveryNode node = cursor.value; + if (node.getAddress().equals(address)) { + return node; + } + } + return null; + } + + /** + * Returns the version of the node with the oldest version in the cluster that is not a client node + * + * If there are no non-client nodes, Version.CURRENT will be returned. + * + * @return the oldest version in the cluster + */ + public Version getSmallestNonClientNodeVersion() { + return minNonClientNodeVersion; + } + + /** + * Returns the version of the node with the youngest version in the cluster that is not a client node. + * + * If there are no non-client nodes, Version.CURRENT will be returned. + * + * @return the youngest version in the cluster + */ + public Version getLargestNonClientNodeVersion() { + return maxNonClientNodeVersion; + } + + /** + * Returns the version of the node with the oldest version in the cluster. + * + * @return the oldest version in the cluster + */ + public Version getMinNodeVersion() { + return minNodeVersion; + } + + /** + * Returns the version of the node with the youngest version in the cluster + * + * @return the youngest version in the cluster + */ + public Version getMaxNodeVersion() { + return maxNodeVersion; + } + + /** + * Resolve a node with a given id + * + * @param node id of the node to discover + * @return discovered node matching the given id + * @throws IllegalArgumentException if more than one node matches the request or no nodes have been resolved + */ + public ProtobufDiscoveryNode resolveNode(String node) { + String[] resolvedNodeIds = resolveNodes(node); + if (resolvedNodeIds.length > 1) { + throw new IllegalArgumentException( + "resolved [" + node + "] into [" + resolvedNodeIds.length + "] nodes, where expected to be resolved to a single node" + ); + } + if (resolvedNodeIds.length == 0) { + throw new IllegalArgumentException("failed to resolve [" + node + "], no matching nodes"); + } + return nodes.get(resolvedNodeIds[0]); + } + + /** + * Resolves a set of nodes according to the given sequence of node specifications. Implements the logic in various APIs that allow the + * user to run the action on a subset of the nodes in the cluster. See [Node specification] in the reference manual for full details. + * + * Works by tracking the current set of nodes and applying each node specification in sequence. The set starts out empty and each node + * specification may either add or remove nodes. For instance: + * + * - _local, _cluster_manager (_master) and _all respectively add to the subset the local node, the currently-elected cluster_manager, and all the nodes + * - node IDs, names, hostnames and IP addresses all add to the subset any nodes which match + * - a wildcard-based pattern of the form "attr*:value*" adds to the subset all nodes with a matching attribute with a matching value + * - role:true adds to the subset all nodes with a matching role + * - role:false removes from the subset all nodes with a matching role. + * + * An empty sequence of node specifications returns all nodes, since the corresponding actions run on all nodes by default. + */ + public String[] resolveNodes(String... nodes) { + if (nodes == null || nodes.length == 0) { + return StreamSupport.stream(this.spliterator(), false).map(ProtobufDiscoveryNode::getId).toArray(String[]::new); + } else { + ObjectHashSet resolvedNodesIds = new ObjectHashSet<>(nodes.length); + for (String nodeId : nodes) { + if (nodeId == null) { + // don't silence the underlying issue, it is a bug, so lets fail if assertions are enabled + assert nodeId != null : "nodeId should not be null"; + continue; + } else if (nodeId.equals("_local")) { + String localNodeId = getLocalNodeId(); + if (localNodeId != null) { + resolvedNodesIds.add(localNodeId); + } + } else if (nodeId.equals("_master") || nodeId.equals("_cluster_manager")) { + String clusterManagerNodeId = getClusterManagerNodeId(); + if (clusterManagerNodeId != null) { + resolvedNodesIds.add(clusterManagerNodeId); + } + } else if (nodeExists(nodeId)) { + resolvedNodesIds.add(nodeId); + } else { + for (ProtobufDiscoveryNode node : this) { + if ("_all".equals(nodeId) + || Regex.simpleMatch(nodeId, node.getName()) + || Regex.simpleMatch(nodeId, node.getHostAddress()) + || Regex.simpleMatch(nodeId, node.getHostName())) { + resolvedNodesIds.add(node.getId()); + } + } + int index = nodeId.indexOf(':'); + if (index != -1) { + String matchAttrName = nodeId.substring(0, index); + String matchAttrValue = nodeId.substring(index + 1); + if (DiscoveryNodeRole.DATA_ROLE.roleName().equals(matchAttrName)) { + if (Booleans.parseBoolean(matchAttrValue, true)) { + resolvedNodesIds.addAll(dataNodes.keys()); + } else { + resolvedNodesIds.removeAll(dataNodes.keys()); + } + } else if (roleNameIsClusterManager(matchAttrName)) { + if (Booleans.parseBoolean(matchAttrValue, true)) { + resolvedNodesIds.addAll(clusterManagerNodes.keys()); + } else { + resolvedNodesIds.removeAll(clusterManagerNodes.keys()); + } + } else if (DiscoveryNodeRole.INGEST_ROLE.roleName().equals(matchAttrName)) { + if (Booleans.parseBoolean(matchAttrValue, true)) { + resolvedNodesIds.addAll(ingestNodes.keys()); + } else { + resolvedNodesIds.removeAll(ingestNodes.keys()); + } + } else if (ProtobufDiscoveryNode.COORDINATING_ONLY.equals(matchAttrName)) { + if (Booleans.parseBoolean(matchAttrValue, true)) { + resolvedNodesIds.addAll(getCoordinatingOnlyNodes().keys()); + } else { + resolvedNodesIds.removeAll(getCoordinatingOnlyNodes().keys()); + } + } else { + for (ProtobufDiscoveryNode node : this) { + for (DiscoveryNodeRole role : Sets.difference(node.getRoles(), DiscoveryNodeRole.BUILT_IN_ROLES)) { + if (role.roleName().equals(matchAttrName)) { + if (Booleans.parseBoolean(matchAttrValue, true)) { + resolvedNodesIds.add(node.getId()); + } else { + resolvedNodesIds.remove(node.getId()); + } + } + } + } + for (ProtobufDiscoveryNode node : this) { + for (Map.Entry entry : node.getAttributes().entrySet()) { + String attrName = entry.getKey(); + String attrValue = entry.getValue(); + if (Regex.simpleMatch(matchAttrName, attrName) && Regex.simpleMatch(matchAttrValue, attrValue)) { + resolvedNodesIds.add(node.getId()); + } + } + } + } + } + } + } + return resolvedNodesIds.toArray(String.class); + } + } + + public ProtobufDiscoveryNodes newNode(ProtobufDiscoveryNode node) { + return new Builder(this).add(node).build(); + } + + /** + * Returns the changes comparing this nodes to the provided nodes. + */ + public Delta delta(ProtobufDiscoveryNodes other) { + final List removed = new ArrayList<>(); + final List added = new ArrayList<>(); + for (ProtobufDiscoveryNode node : other) { + if (this.nodeExists(node) == false) { + removed.add(node); + } + } + for (ProtobufDiscoveryNode node : this) { + if (other.nodeExists(node) == false) { + added.add(node); + } + } + + return new Delta( + other.getClusterManagerNode(), + getClusterManagerNode(), + localNodeId, + Collections.unmodifiableList(removed), + Collections.unmodifiableList(added) + ); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("nodes: \n"); + for (ProtobufDiscoveryNode node : this) { + sb.append(" ").append(node); + if (node == getLocalNode()) { + sb.append(", local"); + } + if (node == getClusterManagerNode()) { + sb.append(", cluster-manager"); + } + sb.append("\n"); + } + return sb.toString(); + } + + /** + * Delta between nodes. + * + * @opensearch.internal + */ + public static class Delta { + + private final String localNodeId; + @Nullable + private final ProtobufDiscoveryNode previousClusterManagerNode; + @Nullable + private final ProtobufDiscoveryNode newClusterManagerNode; + private final List removed; + private final List added; + + private Delta( + @Nullable ProtobufDiscoveryNode previousClusterManagerNode, + @Nullable ProtobufDiscoveryNode newClusterManagerNode, + String localNodeId, + List removed, + List added + ) { + this.previousClusterManagerNode = previousClusterManagerNode; + this.newClusterManagerNode = newClusterManagerNode; + this.localNodeId = localNodeId; + this.removed = removed; + this.added = added; + } + + public boolean hasChanges() { + return clusterManagerNodeChanged() || !removed.isEmpty() || !added.isEmpty(); + } + + public boolean clusterManagerNodeChanged() { + return Objects.equals(newClusterManagerNode, previousClusterManagerNode) == false; + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNodeChanged()} */ + @Deprecated + public boolean masterNodeChanged() { + return clusterManagerNodeChanged(); + } + + @Nullable + public ProtobufDiscoveryNode previousClusterManagerNode() { + return previousClusterManagerNode; + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #previousClusterManagerNode()} */ + @Deprecated + @Nullable + public ProtobufDiscoveryNode previousMasterNode() { + return previousClusterManagerNode(); + } + + @Nullable + public ProtobufDiscoveryNode newClusterManagerNode() { + return newClusterManagerNode; + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #newClusterManagerNode()} */ + @Deprecated + @Nullable + public ProtobufDiscoveryNode newMasterNode() { + return newClusterManagerNode(); + } + + public boolean removed() { + return !removed.isEmpty(); + } + + public List removedNodes() { + return removed; + } + + public boolean added() { + return !added.isEmpty(); + } + + public List addedNodes() { + return added; + } + + public String shortSummary() { + final StringBuilder summary = new StringBuilder(); + if (clusterManagerNodeChanged()) { + summary.append("cluster-manager node changed {previous ["); + if (previousClusterManagerNode() != null) { + summary.append(previousClusterManagerNode()); + } + summary.append("], current ["); + if (newClusterManagerNode() != null) { + summary.append(newClusterManagerNode()); + } + summary.append("]}"); + } + if (removed()) { + if (summary.length() > 0) { + summary.append(", "); + } + summary.append("removed {").append(Strings.collectionToCommaDelimitedString(removedNodes())).append('}'); + } + if (added()) { + final String addedNodesExceptLocalNode = addedNodes().stream() + .filter(node -> node.getId().equals(localNodeId) == false) + .map(ProtobufDiscoveryNode::toString) + .collect(Collectors.joining(",")); + if (addedNodesExceptLocalNode.length() > 0) { + // ignore ourselves when reporting on nodes being added + if (summary.length() > 0) { + summary.append(", "); + } + summary.append("added {").append(addedNodesExceptLocalNode).append('}'); + } + } + return summary.toString(); + } + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + if (clusterManagerNodeId == null) { + out.writeBoolNoTag(false); + } else { + out.writeBoolNoTag(true); + out.writeStringNoTag(clusterManagerNodeId); + } + out.writeInt32NoTag(nodes.size()); + for (ProtobufDiscoveryNode node : this) { + node.writeTo(out); + } + } + + public static ProtobufDiscoveryNodes readFrom(CodedInputStream in, ProtobufDiscoveryNode localNode) throws IOException { + Builder builder = new Builder(); + if (in.readBool()) { + builder.clusterManagerNodeId(in.readString()); + } + if (localNode != null) { + builder.localNodeId(localNode.getId()); + } + int size = in.readInt32(); + for (int i = 0; i < size; i++) { + ProtobufDiscoveryNode node = new ProtobufDiscoveryNode(in); + if (localNode != null && node.getId().equals(localNode.getId())) { + // reuse the same instance of our address and local node id for faster equality + node = localNode; + } + // some one already built this and validated it's OK, skip the n2 scans + assert builder.validateAdd(node) == null : "building disco nodes from network doesn't pass preflight: " + + builder.validateAdd(node); + builder.putUnsafe(node); + } + return builder.build(); + } + + public static Diff readDiffFrom(CodedInputStream in, ProtobufDiscoveryNode localNode) throws IOException { + return AbstractDiffable.readDiffFrom(in1 -> readFrom(in1, localNode), in); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ProtobufDiscoveryNodes nodes) { + return new Builder(nodes); + } + + /** + * Builder of a map of discovery nodes. + * + * @opensearch.internal + */ + public static class Builder { + + private final ImmutableOpenMap.Builder nodes; + private String clusterManagerNodeId; + private String localNodeId; + + public Builder() { + nodes = ImmutableOpenMap.builder(); + } + + public Builder(ProtobufDiscoveryNodes nodes) { + this.clusterManagerNodeId = nodes.getClusterManagerNodeId(); + this.localNodeId = nodes.getLocalNodeId(); + this.nodes = ImmutableOpenMap.builder(nodes.getNodes()); + } + + /** + * adds a disco node to the builder. Will throw an {@link IllegalArgumentException} if + * the supplied node doesn't pass the pre-flight checks performed by {@link #validateAdd(ProtobufDiscoveryNode)} + */ + public Builder add(ProtobufDiscoveryNode node) { + final String preflight = validateAdd(node); + if (preflight != null) { + throw new IllegalArgumentException(preflight); + } + putUnsafe(node); + return this; + } + + /** + * Get a node by its id + * + * @param nodeId id of the wanted node + * @return wanted node if it exists. Otherwise null + */ + @Nullable + public ProtobufDiscoveryNode get(String nodeId) { + return nodes.get(nodeId); + } + + private void putUnsafe(ProtobufDiscoveryNode node) { + nodes.put(node.getId(), node); + } + + public Builder remove(String nodeId) { + nodes.remove(nodeId); + return this; + } + + public Builder remove(ProtobufDiscoveryNode node) { + if (node.equals(nodes.get(node.getId()))) { + nodes.remove(node.getId()); + } + return this; + } + + public Builder clusterManagerNodeId(String clusterManagerNodeId) { + this.clusterManagerNodeId = clusterManagerNodeId; + return this; + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNodeId} */ + @Deprecated + public Builder masterNodeId(String clusterManagerNodeId) { + return clusterManagerNodeId(clusterManagerNodeId); + } + + public Builder localNodeId(String localNodeId) { + this.localNodeId = localNodeId; + return this; + } + + /** + * Checks that a node can be safely added to this node collection. + * + * @return null if all is OK or an error message explaining why a node can not be added. + * + * Note: if this method returns a non-null value, calling {@link #add(ProtobufDiscoveryNode)} will fail with an + * exception + */ + private String validateAdd(ProtobufDiscoveryNode node) { + for (ObjectCursor cursor : nodes.values()) { + final ProtobufDiscoveryNode existingNode = cursor.value; + if (node.getAddress().equals(existingNode.getAddress()) && node.getId().equals(existingNode.getId()) == false) { + return "can't add node " + node + ", found existing node " + existingNode + " with same address"; + } + if (node.getId().equals(existingNode.getId()) && node.equals(existingNode) == false) { + return "can't add node " + + node + + ", found existing node " + + existingNode + + " with the same id but is a different node instance"; + } + } + return null; + } + + public ProtobufDiscoveryNodes build() { + ImmutableOpenMap.Builder dataNodesBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder clusterManagerNodesBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder ingestNodesBuilder = ImmutableOpenMap.builder(); + Version minNodeVersion = null; + Version maxNodeVersion = null; + Version minNonClientNodeVersion = null; + Version maxNonClientNodeVersion = null; + for (ObjectObjectCursor nodeEntry : nodes) { + if (nodeEntry.value.isDataNode()) { + dataNodesBuilder.put(nodeEntry.key, nodeEntry.value); + } + if (nodeEntry.value.isClusterManagerNode()) { + clusterManagerNodesBuilder.put(nodeEntry.key, nodeEntry.value); + } + final Version version = nodeEntry.value.getVersion(); + if (nodeEntry.value.isDataNode() || nodeEntry.value.isClusterManagerNode()) { + if (minNonClientNodeVersion == null) { + minNonClientNodeVersion = version; + maxNonClientNodeVersion = version; + } else { + minNonClientNodeVersion = Version.min(minNonClientNodeVersion, version); + maxNonClientNodeVersion = Version.max(maxNonClientNodeVersion, version); + } + } + if (nodeEntry.value.isIngestNode()) { + ingestNodesBuilder.put(nodeEntry.key, nodeEntry.value); + } + minNodeVersion = minNodeVersion == null ? version : Version.min(minNodeVersion, version); + maxNodeVersion = maxNodeVersion == null ? version : Version.max(maxNodeVersion, version); + } + + return new ProtobufDiscoveryNodes( + nodes.build(), + dataNodesBuilder.build(), + clusterManagerNodesBuilder.build(), + ingestNodesBuilder.build(), + clusterManagerNodeId, + localNodeId, + minNonClientNodeVersion == null ? Version.CURRENT : minNonClientNodeVersion, + maxNonClientNodeVersion == null ? Version.CURRENT : maxNonClientNodeVersion, + maxNodeVersion == null ? Version.CURRENT : maxNodeVersion, + minNodeVersion == null ? Version.CURRENT : minNodeVersion + ); + } + + public boolean isLocalNodeElectedClusterManager() { + return clusterManagerNodeId != null && clusterManagerNodeId.equals(localNodeId); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isLocalNodeElectedClusterManager()} */ + @Deprecated + public boolean isLocalNodeElectedMaster() { + return isLocalNodeElectedClusterManager(); + } + } + + /** + * Check if the given name of the node role is 'cluster_manager' or 'master'. + * The method is added for {@link #resolveNodes} to keep the code clear, when support the both above roles. + * @deprecated As of 2.0, because promoting inclusive language. MASTER_ROLE is deprecated. + * @param matchAttrName a given String for a name of the node role. + * @return true if the given roleName is 'cluster_manger' or 'master' + */ + @Deprecated + private boolean roleNameIsClusterManager(String matchAttrName) { + return DiscoveryNodeRole.MASTER_ROLE.roleName().equals(matchAttrName) + || DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName().equals(matchAttrName); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index 5402218664f6f..5b2ef369a930c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -36,6 +36,7 @@ import org.apache.lucene.util.CollectionUtil; import org.opensearch.core.Assertions; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ProtobufClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -161,6 +162,64 @@ public RoutingNodes(ClusterState clusterState, boolean readOnly) { } } + public RoutingNodes(ProtobufClusterState clusterState) { + this(clusterState, true); + } + + public RoutingNodes(ProtobufClusterState clusterState, boolean readOnly) { + this.readOnly = readOnly; + final RoutingTable routingTable = clusterState.routingTable(); + + // fill in the nodeToShards with the "live" nodes + for (ObjectCursor cursor : clusterState.nodes().getDataNodes().values()) { + String nodeId = cursor.value.getId(); + this.nodesToShards.put(cursor.value.getId(), new RoutingNode(nodeId, clusterState.nodes().get(nodeId))); + } + + // fill in the inverse of node -> shards allocated + // also fill replicaSet information + for (ObjectCursor indexRoutingTable : routingTable.indicesRouting().values()) { + for (IndexShardRoutingTable indexShard : indexRoutingTable.value) { + assert indexShard.primary != null; + for (ShardRouting shard : indexShard) { + // to get all the shards belonging to an index, including the replicas, + // we define a replica set and keep track of it. A replica set is identified + // by the ShardId, as this is common for primary and replicas. + // A replica Set might have one (and not more) replicas with the state of RELOCATING. + if (shard.assignedToNode()) { + RoutingNode routingNode = this.nodesToShards.computeIfAbsent( + shard.currentNodeId(), + k -> new RoutingNode(shard.currentNodeId(), clusterState.nodes().get(shard.currentNodeId())) + ); + routingNode.add(shard); + assignedShardsAdd(shard); + if (shard.relocating()) { + relocatingShards++; + // Add the counterpart shard with relocatingNodeId reflecting the source from which + // it's relocating from. + routingNode = nodesToShards.computeIfAbsent( + shard.relocatingNodeId(), + k -> new RoutingNode(shard.relocatingNodeId(), clusterState.nodes().get(shard.relocatingNodeId())) + ); + ShardRouting targetShardRouting = shard.getTargetRelocatingShard(); + addInitialRecovery(targetShardRouting, indexShard.primary); + routingNode.add(targetShardRouting); + assignedShardsAdd(targetShardRouting); + } else if (shard.initializing()) { + if (shard.primary()) { + inactivePrimaryCount++; + } + inactiveShardCount++; + addInitialRecovery(shard, indexShard.primary); + } + } else { + unassignedShards.add(shard); + } + } + } + } + } + private void addRecovery(ShardRouting routing) { updateRecoveryCounts(routing, true, findAssignedPrimaryIfPeerRecovery(routing)); } diff --git a/server/src/main/java/org/opensearch/common/io/stream/ProtobufNamedWriteable.java b/server/src/main/java/org/opensearch/common/io/stream/ProtobufNamedWriteable.java new file mode 100644 index 0000000000000..174f99d7e7f07 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/stream/ProtobufNamedWriteable.java @@ -0,0 +1,29 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.common.io.stream; + +/** + * A {@link Writeable} object identified by its name. +* To be used for arbitrary serializable objects (e.g. queries); when reading them, their name tells +* which specific object needs to be created. +* +* @opensearch.internal +*/ +public interface ProtobufNamedWriteable extends ProtobufWriteable { + + /** + * Returns the name of the writeable object + */ + String getWriteableName(); +} diff --git a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java index 843d6755e42d7..54c7ec6adbc57 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java +++ b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java @@ -11,17 +11,25 @@ import com.google.protobuf.CodedInputStream; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.IntFunction; import org.apache.lucene.util.ArrayUtil; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.unit.TimeValue; import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.Strings; /** * A class for additional methods to read from a {@link CodedInputStream}. @@ -30,6 +38,8 @@ public class ProtobufStreamInput { private Version version = Version.CURRENT; + private static final TimeUnit[] TIME_UNITS = TimeUnit.values(); + /** * The version of the node on the other side of this stream. */ @@ -52,6 +62,24 @@ public String readOptionalString(CodedInputStream in) throws IOException { return null; } + @Nullable + public Long readOptionalLong(CodedInputStream in) throws IOException { + if (readBoolean(in)) { + return in.readInt64(); + } + return null; + } + + @Nullable + public final Boolean readOptionalBoolean(CodedInputStream in) throws IOException { + final byte value = in.readRawByte(); + if (value == 2) { + return null; + } else { + return readBoolean(value); + } + } + /** * If the returned map contains any entries it will be mutable. If it is empty it might be immutable. */ @@ -162,6 +190,51 @@ public BytesReference readBytesReference(int length, CodedInputStream in) throws return new BytesArray(bytes, 0, length); } + /** + * Read a {@link TimeValue} from the stream + */ + public TimeValue readTimeValue(CodedInputStream in) throws IOException { + long duration = in.readInt64(); + TimeUnit timeUnit = TIME_UNITS[in.readRawByte()]; + return new TimeValue(duration, timeUnit); + } + + public String[] readStringArray(CodedInputStream in) throws IOException { + int size = readArraySize(in); + if (size == 0) { + return Strings.EMPTY_ARRAY; + } + String[] ret = new String[size]; + for (int i = 0; i < size; i++) { + ret[i] = in.readString(); + } + return ret; + } + + private > E readEnum(Class enumClass, E[] values, CodedInputStream in) throws IOException { + int ordinal = readVInt(in); + if (ordinal < 0 || ordinal >= values.length) { + throw new IOException("Unknown " + enumClass.getSimpleName() + " ordinal [" + ordinal + "]"); + } + return values[ordinal]; + } + + /** + * Reads an enum with type E that was serialized based on the value of it's ordinal + */ + public > EnumSet readEnumSet(Class enumClass, CodedInputStream in) throws IOException { + int size = readVInt(in); + final EnumSet res = EnumSet.noneOf(enumClass); + if (size == 0) { + return res; + } + final E[] values = enumClass.getEnumConstants(); + for (int i = 0; i < size; i++) { + res.add(readEnum(enumClass, values, in)); + } + return res; + } + private boolean readBoolean(final byte value) { if (value == 0) { return false; @@ -172,4 +245,86 @@ private boolean readBoolean(final byte value) { throw new IllegalStateException(message); } } + + /** + * Reads a list of objects. The list is expected to have been written using {@link StreamOutput#writeList(List)}. + * If the returned list contains any entries it will be mutable. If it is empty it might be immutable. + * + * @return the list of objects + * @throws IOException if an I/O exception occurs reading the list + */ + public List readList(final ProtobufWriteable.Reader reader, CodedInputStream in) throws IOException { + return readCollection(reader, ArrayList::new, Collections.emptyList(), in); + } + + /** + * Reads a collection of objects + */ + private > C readCollection( + ProtobufWriteable.Reader reader, + IntFunction constructor, + C empty, + CodedInputStream in + ) throws IOException { + int count = readArraySize(in); + if (count == 0) { + return empty; + } + C builder = constructor.apply(count); + for (int i = 0; i < count; i++) { + builder.add(reader.read(in)); + } + return builder; + } + + /** + * Reads a {@link NamedWriteable} from the current stream, by first reading its name and then looking for + * the corresponding entry in the registry by name, so that the proper object can be read and returned. + * Default implementation throws {@link UnsupportedOperationException} as StreamInput doesn't hold a registry. + */ + @Nullable + public C readNamedWriteable(@SuppressWarnings("unused") Class categoryClass) throws IOException { + throw new UnsupportedOperationException("can't read named writeable from StreamInput"); + } + + /** + * Reads a {@link ProtobufNamedWriteable} from the current stream with the given name. It is assumed that the caller obtained the name + * from other source, so it's not read from the stream. The name is used for looking for + * the corresponding entry in the registry by name, so that the proper object can be read and returned. + * Default implementation throws {@link UnsupportedOperationException} as StreamInput doesn't hold a registry. + */ + @Nullable + public C readNamedWriteable( + @SuppressWarnings("unused") Class categoryClass, + @SuppressWarnings("unused") String name + ) throws IOException { + throw new UnsupportedOperationException("can't read named writeable from StreamInput"); + } + + public T[] readOptionalArray(ProtobufWriteable.Reader reader, IntFunction arraySupplier, CodedInputStream in) + throws IOException { + return readBoolean(in) ? readArray(reader, arraySupplier, in) : null; + } + + public T[] readArray(final ProtobufWriteable.Reader reader, final IntFunction arraySupplier, CodedInputStream in) + throws IOException { + final int length = readArraySize(in); + final T[] values = arraySupplier.apply(length); + for (int i = 0; i < length; i++) { + values[i] = reader.read(in); + } + return values; + } + + /** + * Read an optional {@link TimeValue} from the stream, returning null if no TimeValue was written. + */ + public @Nullable TimeValue readOptionalTimeValue(CodedInputStream in) throws IOException { + if (readBoolean(in)) { + return readTimeValue(in); + } else { + return null; + } + } + } diff --git a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java index b6082159d024a..5c6f7106938b8 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java +++ b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamOutput.java @@ -11,10 +11,14 @@ import com.google.protobuf.CodedOutputStream; import java.io.IOException; +import java.util.Collection; +import java.util.EnumSet; import java.util.Map; import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.ProtobufWriteable.Writer; +import org.opensearch.common.unit.TimeValue; /** * A class for additional methods to write to a {@link CodedOutputStream}. @@ -68,4 +72,120 @@ public void writeOptionalWriteable(@Nullable ProtobufWriteable writeable, CodedO } } + /** + * Write a {@link TimeValue} to the stream + */ + public void writeTimeValue(TimeValue timeValue, CodedOutputStream out) throws IOException { + out.writeInt64NoTag(timeValue.duration()); + out.writeRawByte((byte) timeValue.timeUnit().ordinal()); + } + + /** + * Writes an EnumSet with type E that by serialized it based on it's ordinal value + */ + public > void writeEnumSet(EnumSet enumSet, CodedOutputStream out) throws IOException { + out.writeInt32NoTag(enumSet.size()); + for (E e : enumSet) { + out.writeEnumNoTag(e.ordinal()); + } + } + + public void writeOptionalLong(@Nullable Long l, CodedOutputStream out) throws IOException { + if (l == null) { + out.writeBoolNoTag(false); + } else { + out.writeBoolNoTag(true); + out.writeInt64NoTag(l); + } + } + + public void writeOptionalString(@Nullable String str, CodedOutputStream out) throws IOException { + if (str == null) { + out.writeBoolNoTag(false); + } else { + out.writeBoolNoTag(true); + out.writeStringNoTag(str); + } + } + + public void writeOptionalBoolean(@Nullable Boolean b, CodedOutputStream out) throws IOException { + byte two = 2; + if (b == null) { + out.write(two); + } else { + out.writeBoolNoTag(b); + } + } + + /** + * Writes a collection of objects via a {@link Writer}. + * + * @param collection the collection of objects + * @throws IOException if an I/O exception occurs writing the collection + */ + public void writeCollection(final Collection collection, final Writer writer, CodedOutputStream out) throws IOException { + out.writeInt32NoTag(collection.size()); + for (final T val : collection) { + writer.write(out, val); + } + } + + public void writeStringArray(String[] array, CodedOutputStream out) throws IOException { + out.writeInt32NoTag(array.length); + for (String s : array) { + out.writeStringNoTag(s); + } + } + + /** + * Writes a {@link ProtobufNamedWriteable} to the current stream, by first writing its name and then the object itself + */ + public void writeNamedWriteable(ProtobufNamedWriteable namedWriteable, CodedOutputStream out) throws IOException { + out.writeStringNoTag(namedWriteable.getWriteableName()); + namedWriteable.writeTo(out); + } + + /** + * Writes a string array, for nullable string, writes it as 0 (empty string). + */ + public void writeStringArrayNullable(@Nullable String[] array, CodedOutputStream out) throws IOException { + if (array == null) { + out.writeInt32NoTag(0); + } else { + writeStringArray(array, out); + } + } + + /** + * Write an optional {@link TimeValue} to the stream. + */ + public void writeOptionalTimeValue(@Nullable TimeValue timeValue, CodedOutputStream out) throws IOException { + if (timeValue == null) { + out.writeBoolNoTag(false); + } else { + out.writeBoolNoTag(true); + writeTimeValue(timeValue, out); + } + } + + /** + * Same as {@link #writeArray(Writer, Object[], CodedOutputStream)} but the provided array may be null. An additional boolean value is + * serialized to indicate whether the array was null or not. + */ + public void writeOptionalArray(final Writer writer, final @Nullable T[] array, CodedOutputStream out) throws IOException { + if (array == null) { + out.writeBoolNoTag(false); + } else { + out.writeBoolNoTag(true); + writeArray(writer, array, out); + } + } + + public void writeArray(final Writer writer, final T[] array, CodedOutputStream out) throws IOException { + out.writeInt32NoTag(array.length); + for (T value : array) { + writer.write(out, value); + } + } + } diff --git a/server/src/main/java/org/opensearch/common/io/stream/ProtobufVersionedNamedWriteable.java b/server/src/main/java/org/opensearch/common/io/stream/ProtobufVersionedNamedWriteable.java new file mode 100644 index 0000000000000..5e888abc9b0aa --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/stream/ProtobufVersionedNamedWriteable.java @@ -0,0 +1,34 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.common.io.stream; + +import org.opensearch.Version; + +/** + * A {@link NamedWriteable} that has a minimum version associated with it. +* +* @opensearch.internal +*/ +public interface ProtobufVersionedNamedWriteable extends ProtobufNamedWriteable { + + /** + * Returns the name of the writeable object + */ + String getWriteableName(); + + /** + * The minimal version of the recipient this object can be sent to + */ + Version getMinimalSupportedVersion(); +} diff --git a/server/src/main/java/org/opensearch/common/settings/Settings.java b/server/src/main/java/org/opensearch/common/settings/Settings.java index cd80e9727e0df..1ed86bf045726 100644 --- a/server/src/main/java/org/opensearch/common/settings/Settings.java +++ b/server/src/main/java/org/opensearch/common/settings/Settings.java @@ -32,6 +32,8 @@ package org.opensearch.common.settings; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; import org.apache.logging.log4j.Level; import org.opensearch.OpenSearchGenerationException; import org.opensearch.OpenSearchParseException; @@ -575,6 +577,23 @@ public static Settings readSettingsFromStream(StreamInput in) throws IOException return builder.build(); } + public static Settings readSettingsFromStreamProtobuf(CodedInputStream in) throws IOException { + Builder builder = new Builder(); + int numberOfSettings = in.readInt32(); + for (int i = 0; i < numberOfSettings; i++) { + String key = in.readString(); + Object value = in.readString(); + if (value == null) { + builder.putNull(key); + } else if (value instanceof List) { + builder.putList(key, (List) value); + } else { + builder.put(key, value.toString()); + } + } + return builder.build(); + } + public static void writeSettingsToStream(Settings settings, StreamOutput out) throws IOException { // pull settings to exclude secure settings in size() Set> entries = settings.settings.entrySet(); @@ -585,6 +604,16 @@ public static void writeSettingsToStream(Settings settings, StreamOutput out) th } } + public static void writeSettingsToStreamProtobuf(Settings settings, CodedOutputStream out) throws IOException { + // pull settings to exclude secure settings in size() + Set> entries = settings.settings.entrySet(); + out.writeInt32NoTag(entries.size()); + for (Map.Entry entry : entries) { + out.writeStringNoTag(entry.getKey()); + out.writeStringNoTag(entry.getValue().toString()); + } + } + /** * Returns a builder to be used in order to build settings. */ diff --git a/server/src/main/java/org/opensearch/common/transport/ProtobufBoundTransportAddress.java b/server/src/main/java/org/opensearch/common/transport/ProtobufBoundTransportAddress.java new file mode 100644 index 0000000000000..97de0d42b5592 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/transport/ProtobufBoundTransportAddress.java @@ -0,0 +1,68 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.common.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; + +import java.io.IOException; + +/** + * A bounded transport address is a tuple of {@link TransportAddress}, one array that represents +* the addresses the transport is bound to, and the other is the published one that represents the address clients +* should communicate on. +* +* @opensearch.internal +*/ +public class ProtobufBoundTransportAddress implements ProtobufWriteable { + + private ProtobufTransportAddress[] boundAddresses; + + private ProtobufTransportAddress publishAddress; + + public ProtobufBoundTransportAddress(CodedInputStream in) throws IOException { + int boundAddressLength = in.readInt32(); + boundAddresses = new ProtobufTransportAddress[boundAddressLength]; + for (int i = 0; i < boundAddressLength; i++) { + boundAddresses[i] = new ProtobufTransportAddress(in); + } + publishAddress = new ProtobufTransportAddress(in); + } + + public ProtobufBoundTransportAddress(ProtobufTransportAddress[] boundAddresses, ProtobufTransportAddress publishAddress) { + if (boundAddresses == null || boundAddresses.length < 1) { + throw new IllegalArgumentException("at least one bound address must be provided"); + } + this.boundAddresses = boundAddresses; + this.publishAddress = publishAddress; + } + + public ProtobufTransportAddress[] boundAddresses() { + return boundAddresses; + } + + public ProtobufTransportAddress publishAddress() { + return publishAddress; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeInt32NoTag(boundAddresses.length); + for (ProtobufTransportAddress address : boundAddresses) { + address.writeTo(out); + } + publishAddress.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/common/transport/ProtobufTransportAddress.java b/server/src/main/java/org/opensearch/common/transport/ProtobufTransportAddress.java new file mode 100644 index 0000000000000..274c8404e11af --- /dev/null +++ b/server/src/main/java/org/opensearch/common/transport/ProtobufTransportAddress.java @@ -0,0 +1,108 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.common.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.network.NetworkAddress; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; + +/** + * A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}). +* +* @opensearch.internal +*/ +public final class ProtobufTransportAddress implements ProtobufWriteable { + + /** + * A non-routeable v4 meta transport address that can be used for + * testing or in scenarios where targets should be marked as non-applicable from a transport perspective. + */ + public static final InetAddress META_ADDRESS; + + static { + try { + META_ADDRESS = InetAddress.getByName("0.0.0.0"); + } catch (UnknownHostException e) { + throw new AssertionError(e); + } + } + + private final InetSocketAddress address; + + public ProtobufTransportAddress(InetAddress address, int port) { + this(new InetSocketAddress(address, port)); + } + + public ProtobufTransportAddress(InetSocketAddress address) { + if (address == null) { + throw new IllegalArgumentException("InetSocketAddress must not be null"); + } + if (address.getAddress() == null) { + throw new IllegalArgumentException("Address must be resolved but wasn't - InetSocketAddress#getAddress() returned null"); + } + this.address = address; + } + + /** + * Read from a stream. + */ + public ProtobufTransportAddress(CodedInputStream in) throws IOException { + final int len = in.readRawByte(); + final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6) + in.readRawBytes(len); + String host = in.readString(); // the host string was serialized so we can ignore the passed in version + final InetAddress inetAddress = InetAddress.getByAddress(host, a); + int port = in.readInt32(); + this.address = new InetSocketAddress(inetAddress, port); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6) + out.write((byte) bytes.length); // 1 byte + out.write(bytes, 0, bytes.length); + out.writeStringNoTag(address.getHostString()); + // don't serialize scope ids over the network!!!! + // these only make sense with respect to the local machine, and will only formulate + // the address incorrectly remotely. + out.writeInt32NoTag(address.getPort()); + } + + /** + * Returns a string representation of the enclosed {@link InetSocketAddress} + * @see NetworkAddress#format(InetAddress) + */ + public String getAddress() { + return NetworkAddress.format(address.getAddress()); + } + + /** + * Returns the addresses port + */ + public int getPort() { + return address.getPort(); + } + + /** + * Returns the enclosed {@link InetSocketAddress} + */ + public InetSocketAddress address() { + return this.address; + } +} diff --git a/server/src/main/java/org/opensearch/common/unit/ProtobufSizeValue.java b/server/src/main/java/org/opensearch/common/unit/ProtobufSizeValue.java new file mode 100644 index 0000000000000..201b74d4a83c3 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/unit/ProtobufSizeValue.java @@ -0,0 +1,136 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.common.unit; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; + +import java.io.IOException; + +/** + * Conversion values. +* +* @opensearch.internal +*/ +public class ProtobufSizeValue implements ProtobufWriteable { + + private final long size; + private final SizeUnit sizeUnit; + + public ProtobufSizeValue(long singles) { + this(singles, SizeUnit.SINGLE); + } + + public ProtobufSizeValue(long size, SizeUnit sizeUnit) { + if (size < 0) { + throw new IllegalArgumentException("size in SizeValue may not be negative"); + } + this.size = size; + this.sizeUnit = sizeUnit; + } + + public ProtobufSizeValue(CodedInputStream in) throws IOException { + size = in.readInt64(); + sizeUnit = SizeUnit.SINGLE; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeInt64NoTag(singles()); + } + + public long singles() { + return sizeUnit.toSingles(size); + } + + public long getSingles() { + return singles(); + } + + public long kilo() { + return sizeUnit.toKilo(size); + } + + public long getKilo() { + return kilo(); + } + + public long mega() { + return sizeUnit.toMega(size); + } + + public long getMega() { + return mega(); + } + + public long giga() { + return sizeUnit.toGiga(size); + } + + public long getGiga() { + return giga(); + } + + public long tera() { + return sizeUnit.toTera(size); + } + + public long getTera() { + return tera(); + } + + public long peta() { + return sizeUnit.toPeta(size); + } + + public long getPeta() { + return peta(); + } + + public double kiloFrac() { + return ((double) singles()) / SizeUnit.C1; + } + + public double getKiloFrac() { + return kiloFrac(); + } + + public double megaFrac() { + return ((double) singles()) / SizeUnit.C2; + } + + public double getMegaFrac() { + return megaFrac(); + } + + public double gigaFrac() { + return ((double) singles()) / SizeUnit.C3; + } + + public double getGigaFrac() { + return gigaFrac(); + } + + public double teraFrac() { + return ((double) singles()) / SizeUnit.C4; + } + + public double getTeraFrac() { + return teraFrac(); + } + + public double petaFrac() { + return ((double) singles()) / SizeUnit.C5; + } + + public double getPetaFrac() { + return petaFrac(); + } +} diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 1e1d70f1468dd..924a773b578be 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -31,12 +31,15 @@ package org.opensearch.common.util.concurrent; +import com.google.protobuf.CodedOutputStream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.client.OriginSettingClient; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -99,7 +102,7 @@ * * @opensearch.internal */ -public final class ThreadContext implements Writeable { +public final class ThreadContext implements Writeable, ProtobufWriteable { public static final String PREFIX = "request.headers"; public static final Setting DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", Property.NodeScope); @@ -185,6 +188,25 @@ public Writeable captureAsWriteable() { }; } + /** + * Captures the current thread context as writeable, allowing it to be serialized out later + */ + public ProtobufWriteable captureAsWriteable() { + final ThreadContextStruct context = threadLocal.get(); + return out -> { + final Map propagatedHeaders = propagateHeaders(context.transientHeaders); + context.writeTo(out, defaultHeader, propagatedHeaders); + }; + } + + /** + * Captures the current thread context as writeable, allowing it to be serialized out later + */ + public ProtobufWriteable captureAsWriteable() { + final ThreadContextStruct context = threadLocal.get(); + return out -> context.writeTo(out, defaultHeader); + } + /** * Removes the current context and resets a default context marked with as * originating from the supplied string. The removed context can be @@ -828,6 +850,25 @@ private void writeTo(StreamOutput out, Map defaultHeaders, Map defaultHeaders) throws IOException { + final Map requestHeaders; + if (defaultHeaders.isEmpty()) { + requestHeaders = this.requestHeaders; + } else { + requestHeaders = new HashMap<>(defaultHeaders); + requestHeaders.putAll(this.requestHeaders); + } + + out.writeInt32NoTag(requestHeaders.size()); + for (Map.Entry entry : requestHeaders.entrySet()) { + out.writeStringNoTag(entry.getKey()); + out.writeStringNoTag(entry.getValue()); + } + + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + protobufStreamOutput.writeMap(responseHeaders, CodedOutputStream::writeStringNoTag, CodedOutputStream::writeStringNoTag, out); + } } /** @@ -960,4 +1001,9 @@ public Set characteristics() { } } + @Override + public void writeTo(CodedOutputStream out) throws IOException { + threadLocal.get().writeTo(out, defaultHeader); + } + } diff --git a/server/src/main/java/org/opensearch/http/ProtobufHttpInfo.java b/server/src/main/java/org/opensearch/http/ProtobufHttpInfo.java new file mode 100644 index 0000000000000..612c98e7df7e8 --- /dev/null +++ b/server/src/main/java/org/opensearch/http/ProtobufHttpInfo.java @@ -0,0 +1,64 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.http; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.node.ProtobufReportingService; + +import java.io.IOException; + +/** + * Information about an http connection +* +* @opensearch.internal +*/ +public class ProtobufHttpInfo implements ProtobufReportingService.ProtobufInfo { + + private final ProtobufBoundTransportAddress address; + private final long maxContentLength; + + public ProtobufHttpInfo(CodedInputStream in) throws IOException { + this(new ProtobufBoundTransportAddress(in), in.readInt64()); + } + + public ProtobufHttpInfo(ProtobufBoundTransportAddress address, long maxContentLength) { + this.address = address; + this.maxContentLength = maxContentLength; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + address.writeTo(out); + out.writeInt64NoTag(maxContentLength); + } + + public ProtobufBoundTransportAddress address() { + return address; + } + + public ProtobufBoundTransportAddress getAddress() { + return address(); + } + + public ByteSizeValue maxContentLength() { + return new ByteSizeValue(maxContentLength); + } + + public ByteSizeValue getMaxContentLength() { + return maxContentLength(); + } +} diff --git a/server/src/main/java/org/opensearch/ingest/ProtobufIngestInfo.java b/server/src/main/java/org/opensearch/ingest/ProtobufIngestInfo.java new file mode 100644 index 0000000000000..d7a72cb078219 --- /dev/null +++ b/server/src/main/java/org/opensearch/ingest/ProtobufIngestInfo.java @@ -0,0 +1,59 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.ingest; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.node.ProtobufReportingService; + +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + +/** + * Information about an ingest event +* +* @opensearch.internal +*/ +public class ProtobufIngestInfo implements ProtobufReportingService.ProtobufInfo { + + private final Set processors; + + public ProtobufIngestInfo(List processors) { + this.processors = new TreeSet<>(processors); // we use a treeset here to have a test-able / predictable order + } + + /** + * Read from a stream. + */ + public ProtobufIngestInfo(CodedInputStream in) throws IOException { + processors = new TreeSet<>(); + final int size = in.readInt32(); + for (int i = 0; i < size; i++) { + processors.add(new ProtobufProcessorInfo(in)); + } + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeInt32NoTag(processors.size()); + for (ProtobufProcessorInfo info : processors) { + info.writeTo(out); + } + } + + public Iterable getProcessors() { + return processors; + } + + public boolean containsProcessor(String type) { + return processors.contains(new ProtobufProcessorInfo(type)); + } +} diff --git a/server/src/main/java/org/opensearch/ingest/ProtobufProcessorInfo.java b/server/src/main/java/org/opensearch/ingest/ProtobufProcessorInfo.java new file mode 100644 index 0000000000000..debfcc669921c --- /dev/null +++ b/server/src/main/java/org/opensearch/ingest/ProtobufProcessorInfo.java @@ -0,0 +1,48 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.ingest; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; + +import java.io.IOException; + +/** + * Information about an ingest processor +* +* @opensearch.internal +*/ +public class ProtobufProcessorInfo implements ProtobufWriteable { + + private final String type; + + public ProtobufProcessorInfo(String type) { + this.type = type; + } + + /** + * Read from a stream. + */ + public ProtobufProcessorInfo(CodedInputStream input) throws IOException { + type = input.readString(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeStringNoTag(this.type); + } + + /** + * @return The unique processor type + */ + public String getType() { + return type; + } +} diff --git a/server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmInfo.java b/server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmInfo.java new file mode 100644 index 0000000000000..03add10433c54 --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmInfo.java @@ -0,0 +1,571 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.monitor.jvm; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.apache.lucene.util.Constants; +import org.opensearch.common.Booleans; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.io.PathUtils; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.node.ProtobufReportingService; + +import java.io.IOException; +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.lang.management.ManagementPermission; +import java.lang.management.MemoryMXBean; +import java.lang.management.MemoryPoolMXBean; +import java.lang.management.PlatformManagedObject; +import java.lang.management.RuntimeMXBean; +import java.lang.reflect.Method; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Holds information about the JVM +* +* @opensearch.internal +*/ +public class ProtobufJvmInfo implements ProtobufReportingService.ProtobufInfo { + + private static ProtobufJvmInfo INSTANCE; + + static { + RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean(); + MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); + + long heapInit = memoryMXBean.getHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getInit(); + long heapMax = memoryMXBean.getHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getMax(); + long nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getInit(); + long nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getMax(); + long directMemoryMax = 0; + try { + Class vmClass = Class.forName("sun.misc.VM"); + directMemoryMax = (Long) vmClass.getMethod("maxDirectMemory").invoke(null); + } catch (Exception t) { + // ignore + } + String[] inputArguments = runtimeMXBean.getInputArguments().toArray(new String[0]); + Mem mem = new Mem(heapInit, heapMax, nonHeapInit, nonHeapMax, directMemoryMax); + + String bootClassPath; + try { + bootClassPath = runtimeMXBean.getBootClassPath(); + } catch (UnsupportedOperationException e) { + // oracle java 9 + bootClassPath = System.getProperty("sun.boot.class.path"); + if (bootClassPath == null) { + // something else + bootClassPath = ""; + } + } + String classPath = runtimeMXBean.getClassPath(); + Map systemProperties = Collections.unmodifiableMap(runtimeMXBean.getSystemProperties()); + + List gcMxBeans = ManagementFactory.getGarbageCollectorMXBeans(); + String[] gcCollectors = new String[gcMxBeans.size()]; + for (int i = 0; i < gcMxBeans.size(); i++) { + GarbageCollectorMXBean gcMxBean = gcMxBeans.get(i); + gcCollectors[i] = gcMxBean.getName(); + } + + List memoryPoolMXBeans = ManagementFactory.getMemoryPoolMXBeans(); + String[] memoryPools = new String[memoryPoolMXBeans.size()]; + for (int i = 0; i < memoryPoolMXBeans.size(); i++) { + MemoryPoolMXBean memoryPoolMXBean = memoryPoolMXBeans.get(i); + memoryPools[i] = memoryPoolMXBean.getName(); + } + + String onError = null; + String onOutOfMemoryError = null; + String useCompressedOops = "unknown"; + String useG1GC = "unknown"; + long g1RegisionSize = -1; + String useSerialGC = "unknown"; + long configuredInitialHeapSize = -1; + long configuredMaxHeapSize = -1; + try { + @SuppressWarnings("unchecked") + Class clazz = (Class) Class.forName( + "com.sun.management.HotSpotDiagnosticMXBean" + ); + Class vmOptionClazz = Class.forName("com.sun.management.VMOption"); + PlatformManagedObject hotSpotDiagnosticMXBean = ManagementFactory.getPlatformMXBean(clazz); + Method vmOptionMethod = clazz.getMethod("getVMOption", String.class); + Method valueMethod = vmOptionClazz.getMethod("getValue"); + + try { + Object onErrorObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnError"); + onError = (String) valueMethod.invoke(onErrorObject); + } catch (Exception ignored) {} + + try { + Object onOutOfMemoryErrorObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnOutOfMemoryError"); + onOutOfMemoryError = (String) valueMethod.invoke(onOutOfMemoryErrorObject); + } catch (Exception ignored) {} + + try { + Object useCompressedOopsVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops"); + useCompressedOops = (String) valueMethod.invoke(useCompressedOopsVmOptionObject); + } catch (Exception ignored) {} + + try { + Object useG1GCVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseG1GC"); + useG1GC = (String) valueMethod.invoke(useG1GCVmOptionObject); + Object regionSizeVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "G1HeapRegionSize"); + g1RegisionSize = Long.parseLong((String) valueMethod.invoke(regionSizeVmOptionObject)); + } catch (Exception ignored) {} + + try { + Object initialHeapSizeVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "InitialHeapSize"); + configuredInitialHeapSize = Long.parseLong((String) valueMethod.invoke(initialHeapSizeVmOptionObject)); + } catch (Exception ignored) {} + + try { + Object maxHeapSizeVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "MaxHeapSize"); + configuredMaxHeapSize = Long.parseLong((String) valueMethod.invoke(maxHeapSizeVmOptionObject)); + } catch (Exception ignored) {} + + try { + Object useSerialGCVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseSerialGC"); + useSerialGC = (String) valueMethod.invoke(useSerialGCVmOptionObject); + } catch (Exception ignored) {} + + } catch (Exception ignored) { + + } + + final boolean bundledJdk = Booleans.parseBoolean(System.getProperty("opensearch.bundled_jdk", Boolean.FALSE.toString())); + final Boolean usingBundledJdk = bundledJdk ? usingBundledJdk() : null; + + INSTANCE = new ProtobufJvmInfo( + JvmPid.getPid(), + System.getProperty("java.version"), + runtimeMXBean.getVmName(), + runtimeMXBean.getVmVersion(), + runtimeMXBean.getVmVendor(), + bundledJdk, + usingBundledJdk, + runtimeMXBean.getStartTime(), + configuredInitialHeapSize, + configuredMaxHeapSize, + mem, + inputArguments, + bootClassPath, + classPath, + systemProperties, + gcCollectors, + memoryPools, + onError, + onOutOfMemoryError, + useCompressedOops, + useG1GC, + useSerialGC, + g1RegisionSize + ); + } + + @SuppressForbidden(reason = "PathUtils#get") + private static boolean usingBundledJdk() { + /* + * We are using the bundled JDK if java.home is the jdk sub-directory of our working directory. This is because we always set + * the working directory of Elasticsearch to home, and the bundled JDK is in the jdk sub-directory there. + */ + final String javaHome = System.getProperty("java.home"); + final String userDir = System.getProperty("user.dir"); + if (Constants.MAC_OS_X) { + return PathUtils.get(javaHome).equals(PathUtils.get(userDir).resolve("jdk.app/Contents/Home").toAbsolutePath()); + } else { + return PathUtils.get(javaHome).equals(PathUtils.get(userDir).resolve("jdk").toAbsolutePath()); + } + } + + public static ProtobufJvmInfo jvmInfo() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new ManagementPermission("monitor")); + sm.checkPropertyAccess("*"); + } + return INSTANCE; + } + + private final long pid; + private final String version; + private final String vmName; + private final String vmVersion; + private final String vmVendor; + private final boolean bundledJdk; + private final Boolean usingBundledJdk; + private final long startTime; + private final long configuredInitialHeapSize; + private final long configuredMaxHeapSize; + private final Mem mem; + private final String[] inputArguments; + private final String bootClassPath; + private final String classPath; + private final Map systemProperties; + private final String[] gcCollectors; + private final String[] memoryPools; + private final String onError; + private final String onOutOfMemoryError; + private final String useCompressedOops; + private final String useG1GC; + private final String useSerialGC; + private final long g1RegionSize; + + private ProtobufJvmInfo( + long pid, + String version, + String vmName, + String vmVersion, + String vmVendor, + boolean bundledJdk, + Boolean usingBundledJdk, + long startTime, + long configuredInitialHeapSize, + long configuredMaxHeapSize, + Mem mem, + String[] inputArguments, + String bootClassPath, + String classPath, + Map systemProperties, + String[] gcCollectors, + String[] memoryPools, + String onError, + String onOutOfMemoryError, + String useCompressedOops, + String useG1GC, + String useSerialGC, + long g1RegionSize + ) { + this.pid = pid; + this.version = version; + this.vmName = vmName; + this.vmVersion = vmVersion; + this.vmVendor = vmVendor; + this.bundledJdk = bundledJdk; + this.usingBundledJdk = usingBundledJdk; + this.startTime = startTime; + this.configuredInitialHeapSize = configuredInitialHeapSize; + this.configuredMaxHeapSize = configuredMaxHeapSize; + this.mem = mem; + this.inputArguments = inputArguments; + this.bootClassPath = bootClassPath; + this.classPath = classPath; + this.systemProperties = systemProperties; + this.gcCollectors = gcCollectors; + this.memoryPools = memoryPools; + this.onError = onError; + this.onOutOfMemoryError = onOutOfMemoryError; + this.useCompressedOops = useCompressedOops; + this.useG1GC = useG1GC; + this.useSerialGC = useSerialGC; + this.g1RegionSize = g1RegionSize; + } + + public ProtobufJvmInfo(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + pid = in.readInt64(); + version = in.readString(); + vmName = in.readString(); + vmVersion = in.readString(); + vmVendor = in.readString(); + bundledJdk = in.readBool(); + usingBundledJdk = protobufStreamInput.readOptionalBoolean(in); + startTime = in.readInt64(); + inputArguments = new String[in.readInt32()]; + for (int i = 0; i < inputArguments.length; i++) { + inputArguments[i] = in.readString(); + } + bootClassPath = in.readString(); + classPath = in.readString(); + systemProperties = protobufStreamInput.readMap(CodedInputStream::readString, CodedInputStream::readString, in); + mem = new Mem(in); + gcCollectors = protobufStreamInput.readStringArray(in); + memoryPools = protobufStreamInput.readStringArray(in); + useCompressedOops = in.readString(); + // the following members are only used locally for bootstrap checks, never serialized nor printed out + this.configuredMaxHeapSize = -1; + this.configuredInitialHeapSize = -1; + this.onError = null; + this.onOutOfMemoryError = null; + this.useG1GC = "unknown"; + this.useSerialGC = "unknown"; + this.g1RegionSize = -1; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + out.writeInt64NoTag(pid); + out.writeStringNoTag(version); + out.writeStringNoTag(vmName); + out.writeStringNoTag(vmVersion); + out.writeStringNoTag(vmVendor); + out.writeBoolNoTag(bundledJdk); + protobufStreamOutput.writeOptionalBoolean(usingBundledJdk, out); + out.writeInt64NoTag(startTime); + out.writeInt32NoTag(inputArguments.length); + for (String inputArgument : inputArguments) { + out.writeStringNoTag(inputArgument); + } + out.writeStringNoTag(bootClassPath); + out.writeStringNoTag(classPath); + out.writeInt32NoTag(this.systemProperties.size()); + for (Map.Entry entry : systemProperties.entrySet()) { + out.writeStringNoTag(entry.getKey()); + out.writeStringNoTag(entry.getValue()); + } + mem.writeTo(out); + protobufStreamOutput.writeStringArray(gcCollectors, out); + protobufStreamOutput.writeStringArray(memoryPools, out); + out.writeStringNoTag(useCompressedOops); + } + + /** + * The process id. + */ + public long pid() { + return this.pid; + } + + /** + * The process id. + */ + public long getPid() { + return pid; + } + + public String version() { + return this.version; + } + + public String getVersion() { + return this.version; + } + + public int versionAsInteger() { + try { + int i = 0; + StringBuilder sVersion = new StringBuilder(); + for (; i < version.length(); i++) { + if (!Character.isDigit(version.charAt(i)) && version.charAt(i) != '.') { + break; + } + if (version.charAt(i) != '.') { + sVersion.append(version.charAt(i)); + } + } + if (i == 0) { + return -1; + } + return Integer.parseInt(sVersion.toString()); + } catch (Exception e) { + return -1; + } + } + + public int versionUpdatePack() { + try { + int i = 0; + StringBuilder sVersion = new StringBuilder(); + for (; i < version.length(); i++) { + if (!Character.isDigit(version.charAt(i)) && version.charAt(i) != '.') { + break; + } + if (version.charAt(i) != '.') { + sVersion.append(version.charAt(i)); + } + } + if (i == 0) { + return -1; + } + Integer.parseInt(sVersion.toString()); + int from; + if (version.charAt(i) == '_') { + // 1.7.0_4 + from = ++i; + } else if (version.charAt(i) == '-' && version.charAt(i + 1) == 'u') { + // 1.7.0-u2-b21 + i = i + 2; + from = i; + } else { + return -1; + } + for (; i < version.length(); i++) { + if (!Character.isDigit(version.charAt(i)) && version.charAt(i) != '.') { + break; + } + } + if (from == i) { + return -1; + } + return Integer.parseInt(version.substring(from, i)); + } catch (Exception e) { + return -1; + } + } + + public String getVmName() { + return this.vmName; + } + + public String getVmVersion() { + return this.vmVersion; + } + + public String getVmVendor() { + return this.vmVendor; + } + + public boolean getBundledJdk() { + return bundledJdk; + } + + public Boolean getUsingBundledJdk() { + return usingBundledJdk; + } + + public long getStartTime() { + return this.startTime; + } + + public Mem getMem() { + return this.mem; + } + + public String[] getInputArguments() { + return this.inputArguments; + } + + public String getBootClassPath() { + return this.bootClassPath; + } + + public String getClassPath() { + return this.classPath; + } + + public Map getSystemProperties() { + return this.systemProperties; + } + + public long getConfiguredInitialHeapSize() { + return configuredInitialHeapSize; + } + + public long getConfiguredMaxHeapSize() { + return configuredMaxHeapSize; + } + + public String onError() { + return onError; + } + + public String onOutOfMemoryError() { + return onOutOfMemoryError; + } + + /** + * The value of the JVM flag UseCompressedOops, if available otherwise + * "unknown". The value "unknown" indicates that an attempt was + * made to obtain the value of the flag on this JVM and the attempt + * failed. + * + * @return the value of the JVM flag UseCompressedOops or "unknown" + */ + public String useCompressedOops() { + return this.useCompressedOops; + } + + public String useG1GC() { + return this.useG1GC; + } + + public String useSerialGC() { + return this.useSerialGC; + } + + public long getG1RegionSize() { + return g1RegionSize; + } + + public String[] getGcCollectors() { + return gcCollectors; + } + + public String[] getMemoryPools() { + return memoryPools; + } + + /** + * Memory information. + * + * @opensearch.internal + */ + public static class Mem implements ProtobufWriteable { + + private final long heapInit; + private final long heapMax; + private final long nonHeapInit; + private final long nonHeapMax; + private final long directMemoryMax; + + public Mem(long heapInit, long heapMax, long nonHeapInit, long nonHeapMax, long directMemoryMax) { + this.heapInit = heapInit; + this.heapMax = heapMax; + this.nonHeapInit = nonHeapInit; + this.nonHeapMax = nonHeapMax; + this.directMemoryMax = directMemoryMax; + } + + public Mem(CodedInputStream in) throws IOException { + this.heapInit = in.readInt64(); + this.heapMax = in.readInt64(); + this.nonHeapInit = in.readInt64(); + this.nonHeapMax = in.readInt64(); + this.directMemoryMax = in.readInt64(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeInt64NoTag(heapInit); + out.writeInt64NoTag(heapMax); + out.writeInt64NoTag(nonHeapInit); + out.writeInt64NoTag(nonHeapMax); + out.writeInt64NoTag(directMemoryMax); + } + + public ByteSizeValue getHeapInit() { + return new ByteSizeValue(heapInit); + } + + public ByteSizeValue getHeapMax() { + return new ByteSizeValue(heapMax); + } + + public ByteSizeValue getNonHeapInit() { + return new ByteSizeValue(nonHeapInit); + } + + public ByteSizeValue getNonHeapMax() { + return new ByteSizeValue(nonHeapMax); + } + + public ByteSizeValue getDirectMemoryMax() { + return new ByteSizeValue(directMemoryMax); + } + } +} diff --git a/server/src/main/java/org/opensearch/monitor/os/ProtobufOsInfo.java b/server/src/main/java/org/opensearch/monitor/os/ProtobufOsInfo.java new file mode 100644 index 0000000000000..e2ce99b62344f --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/os/ProtobufOsInfo.java @@ -0,0 +1,103 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.monitor.os; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.node.ProtobufReportingService; + +import java.io.IOException; + +/** + * Holds Operating System Information +* +* @opensearch.internal +*/ +public class ProtobufOsInfo implements ProtobufReportingService.ProtobufInfo { + + private final long refreshInterval; + private final int availableProcessors; + private final int allocatedProcessors; + private final String name; + private final String prettyName; + private final String arch; + private final String version; + + public ProtobufOsInfo( + final long refreshInterval, + final int availableProcessors, + final int allocatedProcessors, + final String name, + final String prettyName, + final String arch, + final String version + ) { + this.refreshInterval = refreshInterval; + this.availableProcessors = availableProcessors; + this.allocatedProcessors = allocatedProcessors; + this.name = name; + this.prettyName = prettyName; + this.arch = arch; + this.version = version; + } + + public ProtobufOsInfo(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + this.refreshInterval = in.readInt64(); + this.availableProcessors = in.readInt32(); + this.allocatedProcessors = in.readInt32(); + this.name = protobufStreamInput.readOptionalString(in); + this.prettyName = protobufStreamInput.readOptionalString(in); + this.arch = protobufStreamInput.readOptionalString(in); + this.version = protobufStreamInput.readOptionalString(in); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + out.writeInt64NoTag(refreshInterval); + out.writeInt32NoTag(availableProcessors); + out.writeInt32NoTag(allocatedProcessors); + protobufStreamOutput.writeOptionalString(name, out); + protobufStreamOutput.writeOptionalString(prettyName, out); + protobufStreamOutput.writeOptionalString(arch, out); + protobufStreamOutput.writeOptionalString(version, out); + } + + public long getRefreshInterval() { + return this.refreshInterval; + } + + public int getAvailableProcessors() { + return this.availableProcessors; + } + + public int getAllocatedProcessors() { + return this.allocatedProcessors; + } + + public String getName() { + return name; + } + + public String getPrettyName() { + return prettyName; + } + + public String getArch() { + return arch; + } + + public String getVersion() { + return version; + } +} diff --git a/server/src/main/java/org/opensearch/monitor/process/ProtobufProcessInfo.java b/server/src/main/java/org/opensearch/monitor/process/ProtobufProcessInfo.java new file mode 100644 index 0000000000000..ea7b1960c0372 --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/process/ProtobufProcessInfo.java @@ -0,0 +1,65 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.monitor.process; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.node.ProtobufReportingService; + +import java.io.IOException; + +/** + * Holds information for monitoring the process +* +* @opensearch.internal +*/ +public class ProtobufProcessInfo implements ProtobufReportingService.ProtobufInfo { + + private final long refreshInterval; + private final long id; + private final boolean mlockall; + + public ProtobufProcessInfo(long id, boolean mlockall, long refreshInterval) { + this.id = id; + this.mlockall = mlockall; + this.refreshInterval = refreshInterval; + } + + public ProtobufProcessInfo(CodedInputStream in) throws IOException { + refreshInterval = in.readInt64(); + id = in.readInt64(); + mlockall = in.readBool(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeInt64NoTag(refreshInterval); + out.writeInt64NoTag(id); + out.writeBoolNoTag(mlockall); + } + + public long refreshInterval() { + return this.refreshInterval; + } + + public long getRefreshInterval() { + return this.refreshInterval; + } + + /** + * The process id. + */ + public long getId() { + return id; + } + + public boolean isMlockall() { + return mlockall; + } +} diff --git a/server/src/main/java/org/opensearch/node/ProtobufReportingService.java b/server/src/main/java/org/opensearch/node/ProtobufReportingService.java new file mode 100644 index 0000000000000..9d9f507ce8cb3 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/ProtobufReportingService.java @@ -0,0 +1,29 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.node; + +import org.opensearch.common.io.stream.ProtobufWriteable; + +/** + * Node reporting service +* +* @opensearch.internal +*/ +public interface ProtobufReportingService { + I info(); + + /** + * Information interface. + * + * @opensearch.internal + */ + interface ProtobufInfo extends ProtobufWriteable { + + } +} diff --git a/server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java b/server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java new file mode 100644 index 0000000000000..fcc06a0d11ba3 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java @@ -0,0 +1,350 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.plugins; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.Version; +import org.opensearch.bootstrap.JarHell; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Properties; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * An in-memory representation of the plugin descriptor. +* +* @opensearch.api +*/ +public class ProtobufPluginInfo implements ProtobufWriteable { + + public static final String OPENSEARCH_PLUGIN_PROPERTIES = "plugin-descriptor.properties"; + public static final String OPENSEARCH_PLUGIN_POLICY = "plugin-security.policy"; + + private final String name; + private final String description; + private final String version; + private final Version opensearchVersion; + private final String javaVersion; + private final String classname; + private final String customFolderName; + private final List extendedPlugins; + private final boolean hasNativeController; + + /** + * Construct plugin info. + * + * @param name the name of the plugin + * @param description a description of the plugin + * @param version an opaque version identifier for the plugin + * @param opensearchVersion the version of OpenSearch the plugin was built for + * @param javaVersion the version of Java the plugin was built with + * @param classname the entry point to the plugin + * @param customFolderName the custom folder name for the plugin + * @param extendedPlugins other plugins this plugin extends through SPI + * @param hasNativeController whether or not the plugin has a native controller + */ + public ProtobufPluginInfo( + String name, + String description, + String version, + Version opensearchVersion, + String javaVersion, + String classname, + String customFolderName, + List extendedPlugins, + boolean hasNativeController + ) { + this.name = name; + this.description = description; + this.version = version; + this.opensearchVersion = opensearchVersion; + this.javaVersion = javaVersion; + this.classname = classname; + this.customFolderName = customFolderName; + this.extendedPlugins = Collections.unmodifiableList(extendedPlugins); + this.hasNativeController = hasNativeController; + } + + /** + * Construct plugin info. + * + * @param name the name of the plugin + * @param description a description of the plugin + * @param version an opaque version identifier for the plugin + * @param opensearchVersion the version of OpenSearch the plugin was built for + * @param javaVersion the version of Java the plugin was built with + * @param classname the entry point to the plugin + * @param extendedPlugins other plugins this plugin extends through SPI + * @param hasNativeController whether or not the plugin has a native controller + */ + public ProtobufPluginInfo( + String name, + String description, + String version, + Version opensearchVersion, + String javaVersion, + String classname, + List extendedPlugins, + boolean hasNativeController + ) { + this( + name, + description, + version, + opensearchVersion, + javaVersion, + classname, + null /* customFolderName */, + extendedPlugins, + hasNativeController + ); + } + + /** + * Construct plugin info from a stream. + * + * @param in the stream + * @throws IOException if an I/O exception occurred reading the plugin info from the stream + */ + public ProtobufPluginInfo(final CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + this.name = in.readString(); + this.description = in.readString(); + this.version = in.readString(); + this.opensearchVersion = Version.readVersionProtobuf(in); + this.javaVersion = in.readString(); + this.classname = in.readString(); + this.customFolderName = in.readString(); + this.extendedPlugins = protobufStreamInput.readList(CodedInputStream::readString, in); + this.hasNativeController = in.readBool(); + } + + @Override + public void writeTo(final CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + out.writeStringNoTag(name); + out.writeStringNoTag(description); + out.writeStringNoTag(version); + out.writeInt32NoTag(opensearchVersion.id); + out.writeStringNoTag(javaVersion); + out.writeStringNoTag(classname); + if (customFolderName != null) { + out.writeStringNoTag(customFolderName); + } else { + out.writeStringNoTag(name); + } + protobufStreamOutput.writeCollection(extendedPlugins, CodedOutputStream::writeStringNoTag, out); + out.writeBoolNoTag(hasNativeController); + } + + /** + * Reads the plugin descriptor file. + * + * @param path the path to the root directory for the plugin + * @return the plugin info + * @throws IOException if an I/O exception occurred reading the plugin descriptor + */ + public static ProtobufPluginInfo readFromProperties(final Path path) throws IOException { + final Path descriptor = path.resolve(OPENSEARCH_PLUGIN_PROPERTIES); + + final Map propsMap; + { + final Properties props = new Properties(); + try (InputStream stream = Files.newInputStream(descriptor)) { + props.load(stream); + } + propsMap = props.stringPropertyNames().stream().collect(Collectors.toMap(Function.identity(), props::getProperty)); + } + + final String name = propsMap.remove("name"); + if (name == null || name.isEmpty()) { + throw new IllegalArgumentException("property [name] is missing in [" + descriptor + "]"); + } + final String description = propsMap.remove("description"); + if (description == null) { + throw new IllegalArgumentException("property [description] is missing for plugin [" + name + "]"); + } + final String version = propsMap.remove("version"); + if (version == null) { + throw new IllegalArgumentException("property [version] is missing for plugin [" + name + "]"); + } + + final String opensearchVersionString = propsMap.remove("opensearch.version"); + if (opensearchVersionString == null) { + throw new IllegalArgumentException("property [opensearch.version] is missing for plugin [" + name + "]"); + } + final Version opensearchVersion = Version.fromString(opensearchVersionString); + final String javaVersionString = propsMap.remove("java.version"); + if (javaVersionString == null) { + throw new IllegalArgumentException("property [java.version] is missing for plugin [" + name + "]"); + } + JarHell.checkVersionFormat(javaVersionString); + final String classname = propsMap.remove("classname"); + if (classname == null) { + throw new IllegalArgumentException("property [classname] is missing for plugin [" + name + "]"); + } + + final String customFolderNameValue = propsMap.remove("custom.foldername"); + final String customFolderName; + customFolderName = customFolderNameValue; + + final String extendedString = propsMap.remove("extended.plugins"); + final List extendedPlugins; + if (extendedString == null) { + extendedPlugins = Collections.emptyList(); + } else { + extendedPlugins = Arrays.asList(Strings.delimitedListToStringArray(extendedString, ",")); + } + + final String hasNativeControllerValue = propsMap.remove("has.native.controller"); + final boolean hasNativeController; + if (hasNativeControllerValue == null) { + hasNativeController = false; + } else { + switch (hasNativeControllerValue) { + case "true": + hasNativeController = true; + break; + case "false": + hasNativeController = false; + break; + default: + final String message = String.format( + Locale.ROOT, + "property [%s] must be [%s], [%s], or unspecified but was [%s]", + "has_native_controller", + "true", + "false", + hasNativeControllerValue + ); + throw new IllegalArgumentException(message); + } + } + + if (propsMap.isEmpty() == false) { + throw new IllegalArgumentException("Unknown properties in plugin descriptor: " + propsMap.keySet()); + } + + return new ProtobufPluginInfo( + name, + description, + version, + opensearchVersion, + javaVersionString, + classname, + customFolderName, + extendedPlugins, + hasNativeController + ); + } + + /** + * The name of the plugin. + * + * @return the plugin name + */ + public String getName() { + return name; + } + + /** + * The description of the plugin. + * + * @return the plugin description + */ + public String getDescription() { + return description; + } + + /** + * The entry point to the plugin. + * + * @return the entry point to the plugin + */ + public String getClassname() { + return classname; + } + + /** + * The custom folder name for the plugin. + * + * @return the custom folder name for the plugin + */ + public String getFolderName() { + return customFolderName; + } + + /** + * Other plugins this plugin extends through SPI. + * + * @return the names of the plugins extended + */ + public List getExtendedPlugins() { + return extendedPlugins; + } + + /** + * The version of the plugin + * + * @return the version + */ + public String getVersion() { + return version; + } + + /** + * The version of OpenSearch the plugin was built for. + * + * @return an OpenSearch version + */ + public Version getOpenSearchVersion() { + return opensearchVersion; + } + + /** + * The version of Java the plugin was built with. + * + * @return a java version string + */ + public String getJavaVersion() { + return javaVersion; + } + + /** + * Whether or not the plugin has a native controller. + * + * @return {@code true} if the plugin has a native controller + */ + public boolean hasNativeController() { + return hasNativeController; + } + + /** + * The target folder name for the plugin. + * + * @return the custom folder name for the plugin if the folder name is specified, else return the id with kebab-case. + */ + public String getTargetFolderName() { + return (this.customFolderName == null || this.customFolderName.isEmpty()) ? this.name : this.customFolderName; + } +} diff --git a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java index e18a594236fc8..00b9b2fa1fec5 100644 --- a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java @@ -38,6 +38,7 @@ import org.apache.lucene.util.CollectionUtil; import org.opensearch.OpenSearchParseException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.ProtobufClusterManagerNodeRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.collect.Tuple; @@ -254,6 +255,34 @@ public static void parseDeprecatedMasterTimeoutParameter( } } + /** + * Parse the deprecated request parameter 'master_timeout', and add deprecated log if the parameter is used. + * It also validates whether the two parameters 'master_timeout' and 'cluster_manager_timeout' are not assigned together. + * The method is temporarily added in 2.0 duing applying inclusive language. Remove the method along with MASTER_ROLE. + * @param mnr the action request + * @param request the REST request to handle + * @param logger the logger that logs deprecation notices + * @param logMsgKeyPrefix the key prefix of a deprecation message to avoid duplicate messages. + */ + public static void parseDeprecatedMasterTimeoutParameterProtobuf( + ProtobufClusterManagerNodeRequest mnr, + RestRequest request, + DeprecationLogger logger, + String logMsgKeyPrefix + ) { + final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = + "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead."; + final String DUPLICATE_PARAMETER_ERROR_MESSAGE = + "Please only use one of the request parameters [master_timeout, cluster_manager_timeout]."; + if (request.hasParam("master_timeout")) { + logger.deprecate(logMsgKeyPrefix + "_master_timeout_parameter", MASTER_TIMEOUT_DEPRECATED_MESSAGE); + if (request.hasParam("cluster_manager_timeout")) { + throw new OpenSearchParseException(DUPLICATE_PARAMETER_ERROR_MESSAGE); + } + mnr.clusterManagerNodeTimeout(request.paramAsTime("master_timeout", mnr.clusterManagerNodeTimeout())); + } + } + /** * A wrapper for the base handler. * diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index b54c8955283a2..19967097e78b9 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -35,11 +35,16 @@ import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodeInfo; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -65,6 +70,7 @@ import org.opensearch.monitor.fs.FsInfo; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.monitor.jvm.JvmStats; +import org.opensearch.monitor.jvm.ProtobufJvmInfo; import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessInfo; import org.opensearch.monitor.process.ProcessStats; @@ -109,7 +115,7 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + final ProtobufClusterStateRequest clusterStateRequest = new ProtobufClusterStateRequest(); clusterStateRequest.clear().nodes(true); if (request.hasParam("local")) { deprecationLogger.deprecate("cat_nodes_local_parameter", LOCAL_DEPRECATED_MESSAGE); @@ -118,23 +124,23 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli clusterStateRequest.clusterManagerNodeTimeout( request.paramAsTime("cluster_manager_timeout", clusterStateRequest.clusterManagerNodeTimeout()) ); - parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); + parseDeprecatedMasterTimeoutParameterProtobuf(clusterStateRequest, request, deprecationLogger, getName()); final boolean fullId = request.paramAsBoolean("full_id", false); - return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override - public void processResponse(final ClusterStateResponse clusterStateResponse) { - NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + public void processResponse(final ProtobufClusterStateResponse clusterStateResponse) { + ProtobufNodesInfoRequest nodesInfoRequest = new ProtobufNodesInfoRequest(); nodesInfoRequest.timeout(request.param("timeout")); nodesInfoRequest.clear() .addMetrics( - NodesInfoRequest.Metric.JVM.metricName(), - NodesInfoRequest.Metric.OS.metricName(), - NodesInfoRequest.Metric.PROCESS.metricName(), - NodesInfoRequest.Metric.HTTP.metricName() + ProtobufNodesInfoRequest.Metric.JVM.metricName(), + ProtobufNodesInfoRequest.Metric.OS.metricName(), + ProtobufNodesInfoRequest.Metric.PROCESS.metricName(), + ProtobufNodesInfoRequest.Metric.HTTP.metricName() ); - client.admin().cluster().nodesInfo(nodesInfoRequest, new RestActionListener(channel) { + client.admin().cluster().nodesInfo(nodesInfoRequest, new RestActionListener(channel) { @Override - public void processResponse(final NodesInfoResponse nodesInfoResponse) { + public void processResponse(final ProtobufNodesInfoResponse nodesInfoResponse) { NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); nodesStatsRequest.timeout(request.param("timeout")); nodesStatsRequest.clear() @@ -350,8 +356,8 @@ protected Table getTableWithHeader(final RestRequest request) { Table buildTable( boolean fullId, RestRequest req, - ClusterStateResponse state, - NodesInfoResponse nodesInfo, + ProtobufClusterStateResponse state, + ProtobufNodesInfoResponse nodesInfo, NodesStatsResponse nodesStats ) { @@ -360,10 +366,10 @@ Table buildTable( Table table = getTableWithHeader(req); for (DiscoveryNode node : nodes) { - NodeInfo info = nodesInfo.getNodesMap().get(node.getId()); + ProtobufNodeInfo info = nodesInfo.getNodesMap().get(node.getId()); NodeStats stats = nodesStats.getNodesMap().get(node.getId()); - JvmInfo jvmInfo = info == null ? null : info.getInfo(JvmInfo.class); + ProtobufJvmInfo jvmInfo = info == null ? null : info.getInfo(ProtobufJvmInfo.class); JvmStats jvmStats = stats == null ? null : stats.getJvm(); FsInfo fsInfo = stats == null ? null : stats.getFs(); OsStats osStats = stats == null ? null : stats.getOs(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ProtobufAggregationInfo.java b/server/src/main/java/org/opensearch/search/aggregations/support/ProtobufAggregationInfo.java new file mode 100644 index 0000000000000..cb90457671506 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ProtobufAggregationInfo.java @@ -0,0 +1,71 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.search.aggregations.support; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.node.ProtobufReportingService; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.concurrent.atomic.LongAdder; + +/** + * Data describing an agg +* +* @opensearch.internal +*/ +public class ProtobufAggregationInfo implements ProtobufReportingService.ProtobufInfo { + + private final Map> aggs; + + ProtobufAggregationInfo(Map> aggs) { + // we use a treemap/treeset here to have a test-able / predictable order + Map> aggsMap = new TreeMap<>(); + aggs.forEach((s, m) -> aggsMap.put(s, Collections.unmodifiableSet(new TreeSet<>(m.keySet())))); + this.aggs = Collections.unmodifiableMap(aggsMap); + } + + /** + * Read from a stream. + */ + public ProtobufAggregationInfo(CodedInputStream in) throws IOException { + aggs = new TreeMap<>(); + final int size = in.readInt32(); + for (int i = 0; i < size; i++) { + String key = in.readString(); + final int keys = in.readInt32(); + final Set types = new TreeSet<>(); + for (int j = 0; j < keys; j++) { + types.add(in.readString()); + } + aggs.put(key, types); + } + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeInt32NoTag(aggs.size()); + for (Map.Entry> e : aggs.entrySet()) { + out.writeStringNoTag(e.getKey()); + out.writeInt32NoTag(e.getValue().size()); + for (String type : e.getValue()) { + out.writeStringNoTag(type); + } + } + } + + public Map> getAggregations() { + return aggs; + } +} diff --git a/server/src/main/java/org/opensearch/search/pipeline/ProtobufSearchPipelineInfo.java b/server/src/main/java/org/opensearch/search/pipeline/ProtobufSearchPipelineInfo.java new file mode 100644 index 0000000000000..245eaa31b7bcc --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/ProtobufSearchPipelineInfo.java @@ -0,0 +1,56 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.search.pipeline; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.ingest.ProtobufProcessorInfo; +import org.opensearch.node.ProtobufReportingService; + +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + +/** + * Information about a search pipelines event +* +* @opensearch.internal +*/ +public class ProtobufSearchPipelineInfo implements ProtobufReportingService.ProtobufInfo { + + private final Set processors; + + public ProtobufSearchPipelineInfo(List processors) { + this.processors = new TreeSet<>(processors); // we use a treeset here to have a test-able / predictable order + } + + /** + * Read from a stream. + */ + public ProtobufSearchPipelineInfo(CodedInputStream in) throws IOException { + processors = new TreeSet<>(); + final int size = in.readInt32(); + for (int i = 0; i < size; i++) { + processors.add(new ProtobufProcessorInfo(in)); + } + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeInt32NoTag(processors.size()); + for (ProtobufProcessorInfo info : processors) { + info.writeTo(out); + } + } + + public boolean containsProcessor(String type) { + return processors.contains(new ProtobufProcessorInfo(type)); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTask.java b/server/src/main/java/org/opensearch/tasks/ProtobufTask.java index 7c7ee83adb679..21165cb7736ca 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTask.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTask.java @@ -14,9 +14,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ProtobufActionResponse; import org.opensearch.action.NotifyOnceListener; -import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; @@ -385,11 +385,11 @@ public String getHeader(String header) { return headers.get(header); } - public ProtobufTaskResult result(DiscoveryNode node, Exception error) throws IOException { + public ProtobufTaskResult result(ProtobufDiscoveryNode node, Exception error) throws IOException { return new ProtobufTaskResult(taskInfo(node.getId(), true, true), error); } - public ProtobufTaskResult result(DiscoveryNode node, ActionResponse response) throws IOException { + public ProtobufTaskResult result(ProtobufDiscoveryNode node, ProtobufActionResponse response) throws IOException { if (response instanceof ToXContent) { return new ProtobufTaskResult(taskInfo(node.getId(), true, true), (ToXContent) response); } else { diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java new file mode 100644 index 0000000000000..7236fa2ebdc2f --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java @@ -0,0 +1,236 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchSecurityException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.support.ChannelActionListener; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.EmptyTransportResponseHandler; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; +import org.opensearch.transport.TransportResponse; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; + +/** + * Service used to cancel a task +* +* @opensearch.internal +*/ +public class ProtobufTaskCancellationService { + public static final String BAN_PARENT_ACTION_NAME = "internal:admin/tasks/ban"; + private static final Logger logger = LogManager.getLogger(ProtobufTaskCancellationService.class); + private final TransportService transportService; + private final ProtobufTaskManager taskManager; + + public ProtobufTaskCancellationService(TransportService transportService) { + this.transportService = transportService; + this.taskManager = transportService.getTaskManager(); + transportService.registerRequestHandler( + BAN_PARENT_ACTION_NAME, + ThreadPool.Names.SAME, + BanParentTaskRequest::new, + new BanParentRequestHandler() + ); + } + + private String localNodeId() { + return transportService.getLocalNode().getId(); + } + + void cancelTaskAndDescendants(CancellableTask task, String reason, boolean waitForCompletion, ActionListener listener) { + final TaskId taskId = task.taskInfo(localNodeId(), false).getTaskId(); + if (task.shouldCancelChildrenOnCancellation()) { + logger.trace("cancelling task [{}] and its descendants", taskId); + StepListener completedListener = new StepListener<>(); + GroupedActionListener groupedListener = new GroupedActionListener<>(ActionListener.map(completedListener, r -> null), 3); + Collection childrenNodes = taskManager.startBanOnChildrenNodes(task.getId(), () -> { + logger.trace("child tasks of parent [{}] are completed", taskId); + groupedListener.onResponse(null); + }); + taskManager.cancel(task, reason, () -> { + logger.trace("task [{}] is cancelled", taskId); + groupedListener.onResponse(null); + }); + StepListener banOnNodesListener = new StepListener<>(); + setBanOnNodes(reason, waitForCompletion, task, childrenNodes, banOnNodesListener); + banOnNodesListener.whenComplete(groupedListener::onResponse, groupedListener::onFailure); + // If we start unbanning when the last child task completed and that child task executed with a specific user, then unban + // requests are denied because internal requests can't run with a user. We need to remove bans with the current thread context. + final Runnable removeBansRunnable = transportService.getThreadPool() + .getThreadContext() + .preserveContext(() -> removeBanOnNodes(task, childrenNodes)); + // We remove bans after all child tasks are completed although in theory we can do it on a per-node basis. + completedListener.whenComplete(r -> removeBansRunnable.run(), e -> removeBansRunnable.run()); + // if wait_for_completion is true, then only return when (1) bans are placed on child nodes, (2) child tasks are + // completed or failed, (3) the main task is cancelled. Otherwise, return after bans are placed on child nodes. + if (waitForCompletion) { + completedListener.whenComplete(r -> listener.onResponse(null), listener::onFailure); + } else { + banOnNodesListener.whenComplete(r -> listener.onResponse(null), listener::onFailure); + } + } else { + logger.trace("task [{}] doesn't have any children that should be cancelled", taskId); + if (waitForCompletion) { + taskManager.cancel(task, reason, () -> listener.onResponse(null)); + } else { + taskManager.cancel(task, reason, () -> {}); + listener.onResponse(null); + } + } + } + + private void setBanOnNodes( + String reason, + boolean waitForCompletion, + CancellableTask task, + Collection childNodes, + ActionListener listener + ) { + if (childNodes.isEmpty()) { + listener.onResponse(null); + return; + } + final TaskId taskId = new TaskId(localNodeId(), task.getId()); + logger.trace("cancelling child tasks of [{}] on child nodes {}", taskId, childNodes); + GroupedActionListener groupedListener = new GroupedActionListener<>( + ActionListener.map(listener, r -> null), + childNodes.size() + ); + final BanParentTaskRequest banRequest = BanParentTaskRequest.createSetBanParentTaskRequest(taskId, reason, waitForCompletion); + for (DiscoveryNode node : childNodes) { + transportService.sendRequest( + node, + BAN_PARENT_ACTION_NAME, + banRequest, + new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleResponse(TransportResponse.Empty response) { + logger.trace("sent ban for tasks with the parent [{}] to the node [{}]", taskId, node); + groupedListener.onResponse(null); + } + + @Override + public void handleException(TransportException exp) { + assert ExceptionsHelper.unwrapCause(exp) instanceof OpenSearchSecurityException == false; + logger.warn("Cannot send ban for tasks with the parent [{}] to the node [{}]", taskId, node); + groupedListener.onFailure(exp); + } + } + ); + } + } + + private void removeBanOnNodes(CancellableTask task, Collection childNodes) { + final BanParentTaskRequest request = BanParentTaskRequest.createRemoveBanParentTaskRequest(new TaskId(localNodeId(), task.getId())); + for (DiscoveryNode node : childNodes) { + logger.trace("Sending remove ban for tasks with the parent [{}] to the node [{}]", request.parentTaskId, node); + transportService.sendRequest(node, BAN_PARENT_ACTION_NAME, request, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleException(TransportException exp) { + assert ExceptionsHelper.unwrapCause(exp) instanceof OpenSearchSecurityException == false; + logger.info("failed to remove the parent ban for task {} on node {}", request.parentTaskId, node); + } + }); + } + } + + private static class BanParentTaskRequest extends TransportRequest { + + private final TaskId parentTaskId; + private final boolean ban; + private final boolean waitForCompletion; + private final String reason; + + static BanParentTaskRequest createSetBanParentTaskRequest(TaskId parentTaskId, String reason, boolean waitForCompletion) { + return new BanParentTaskRequest(parentTaskId, reason, waitForCompletion); + } + + static BanParentTaskRequest createRemoveBanParentTaskRequest(TaskId parentTaskId) { + return new BanParentTaskRequest(parentTaskId); + } + + private BanParentTaskRequest(TaskId parentTaskId, String reason, boolean waitForCompletion) { + this.parentTaskId = parentTaskId; + this.ban = true; + this.reason = reason; + this.waitForCompletion = waitForCompletion; + } + + private BanParentTaskRequest(TaskId parentTaskId) { + this.parentTaskId = parentTaskId; + this.ban = false; + this.reason = null; + this.waitForCompletion = false; + } + + private BanParentTaskRequest(StreamInput in) throws IOException { + super(in); + parentTaskId = TaskId.readFromStream(in); + ban = in.readBoolean(); + reason = ban ? in.readString() : null; + waitForCompletion = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + parentTaskId.writeTo(out); + out.writeBoolean(ban); + if (ban) { + out.writeString(reason); + } + out.writeBoolean(waitForCompletion); + } + } + + private class BanParentRequestHandler implements TransportRequestHandler { + @Override + public void messageReceived(final BanParentTaskRequest request, final TransportChannel channel, Task task) throws Exception { + if (request.ban) { + logger.debug( + "Received ban for the parent [{}] on the node [{}], reason: [{}]", + request.parentTaskId, + localNodeId(), + request.reason + ); + final List childTasks = taskManager.setBan(request.parentTaskId, request.reason); + final GroupedActionListener listener = new GroupedActionListener<>( + ActionListener.map( + new ChannelActionListener<>(channel, BAN_PARENT_ACTION_NAME, request), + r -> TransportResponse.Empty.INSTANCE + ), + childTasks.size() + 1 + ); + for (CancellableTask childTask : childTasks) { + cancelTaskAndDescendants(childTask, request.reason, request.waitForCompletion, listener); + } + listener.onResponse(null); + } else { + logger.debug("Removing ban for the parent [{}] on the node [{}]", request.parentTaskId, localNodeId()); + taskManager.removeBan(request.parentTaskId); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java new file mode 100644 index 0000000000000..55f9213214ca5 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java @@ -0,0 +1,752 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import com.carrotsearch.hppc.ObjectIntHashMap; +import com.carrotsearch.hppc.ObjectIntMap; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.core.Assertions; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.NotifyOnceListener; +import org.opensearch.cluster.ProtobufClusterChangedEvent; +import org.opensearch.cluster.ProtobufClusterStateApplier; +import org.opensearch.cluster.ClusterStateApplier; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.ProtobufDiscoveryNodes; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.SetOnce; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.ConcurrentMapLong; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.transport.TcpChannel; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static org.opensearch.common.unit.TimeValue.timeValueMillis; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; + +/** + * ProtobufTask Manager service for keeping track of currently running tasks on the nodes +* +* @opensearch.internal +*/ +public class ProtobufTaskManager implements ProtobufClusterStateApplier { + + private static final Logger logger = LogManager.getLogger(ProtobufTaskManager.class); + + private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); + + public static final String TASK_RESOURCE_CONSUMERS_ATTRIBUTES = "task_resource_consumers.enabled"; + + public static final Setting TASK_RESOURCE_CONSUMERS_ENABLED = Setting.boolSetting( + TASK_RESOURCE_CONSUMERS_ATTRIBUTES, + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Rest headers that are copied to the task + */ + private final List taskHeaders; + private final ProtobufThreadPool threadPool; + + private final ConcurrentMapLong tasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + + private final ConcurrentMapLong cancellableTasks = ConcurrentCollections + .newConcurrentMapLongWithAggressiveConcurrency(); + + private final AtomicLong taskIdGenerator = new AtomicLong(); + + private final Map banedParents = new ConcurrentHashMap<>(); + + private TaskResultsService taskResultsService; + private final SetOnce taskResourceTrackingService = new SetOnce<>(); + + private volatile ProtobufDiscoveryNodes lastDiscoveryNodes = ProtobufDiscoveryNodes.EMPTY_NODES; + + private final ByteSizeValue maxHeaderSize; + private final Map channelPendingTaskTrackers = ConcurrentCollections.newConcurrentMap(); + private final SetOnce cancellationService = new SetOnce<>(); + + private volatile boolean taskResourceConsumersEnabled; + private final Set> taskResourceConsumer; + + public static ProtobufTaskManager createTaskManagerWithClusterSettings( + Settings settings, + ClusterSettings clusterSettings, + ProtobufThreadPool threadPool, + Set taskHeaders + ) { + final ProtobufTaskManager taskManager = new ProtobufTaskManager(settings, threadPool, taskHeaders); + clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_CONSUMERS_ENABLED, taskManager::setTaskResourceConsumersEnabled); + return taskManager; + } + + public ProtobufTaskManager(Settings settings, ProtobufThreadPool threadPool, Set taskHeaders) { + this.threadPool = threadPool; + this.taskHeaders = new ArrayList<>(taskHeaders); + this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); + this.taskResourceConsumersEnabled = TASK_RESOURCE_CONSUMERS_ENABLED.get(settings); + taskResourceConsumer = new HashSet<>(); + } + + public void registerTaskResourceConsumer(Consumer consumer) { + taskResourceConsumer.add(consumer); + } + + public void setTaskResultsService(TaskResultsService taskResultsService) { + assert this.taskResultsService == null; + this.taskResultsService = taskResultsService; + } + + public void setTaskCancellationService(TaskCancellationService taskCancellationService) { + this.cancellationService.set(taskCancellationService); + } + + public void setTaskResourceTrackingService(ProtobufTaskResourceTrackingService taskResourceTrackingService) { + this.taskResourceTrackingService.set(taskResourceTrackingService); + } + + public void setTaskResourceConsumersEnabled(boolean taskResourceConsumersEnabled) { + this.taskResourceConsumersEnabled = taskResourceConsumersEnabled; + } + + /** + * Registers a task without parent task + */ + public ProtobufTask register(String type, String action, ProtobufTaskAwareRequest request) { + Map headers = new HashMap<>(); + long headerSize = 0; + long maxSize = maxHeaderSize.getBytes(); + ThreadContext threadContext = threadPool.getThreadContext(); + for (String key : taskHeaders) { + String httpHeader = threadContext.getHeader(key); + if (httpHeader != null) { + headerSize += key.length() * 2 + httpHeader.length() * 2; + if (headerSize > maxSize) { + throw new IllegalArgumentException("Request exceeded the maximum size of task headers " + maxHeaderSize); + } + headers.put(key, httpHeader); + } + } + ProtobufTask task = request.createTask(taskIdGenerator.incrementAndGet(), type, action, request.getParentTask(), headers); + Objects.requireNonNull(task); + assert task.getParentTaskId().equals(request.getParentTask()) : "Request [ " + request + "] didn't preserve it parentTaskId"; + if (logger.isTraceEnabled()) { + logger.trace("register {} [{}] [{}] [{}]", task.getId(), type, action, task.getDescription()); + } + + if (task.supportsResourceTracking()) { + boolean success = task.addResourceTrackingCompletionListener(new NotifyOnceListener<>() { + @Override + protected void innerOnResponse(ProtobufTask task) { + // Stop tracking the task once the last thread has been marked inactive. + if (taskResourceTrackingService.get() != null && task.supportsResourceTracking()) { + taskResourceTrackingService.get().stopTracking(task); + } + } + + @Override + protected void innerOnFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + }); + + if (success == false) { + logger.debug( + "failed to register a completion listener as task resource tracking has already completed [taskId={}]", + task.getId() + ); + } + } + + if (task instanceof ProtobufCancellableTask) { + registerCancellableTask(task); + } else { + ProtobufTask previousTask = tasks.put(task.getId(), task); + assert previousTask == null; + } + return task; + } + + private void registerCancellableTask(ProtobufTask task) { + ProtobufCancellableTask cancellableTask = (ProtobufCancellableTask) task; + CancellableTaskHolder holder = new CancellableTaskHolder(cancellableTask); + CancellableTaskHolder oldHolder = cancellableTasks.put(task.getId(), holder); + assert oldHolder == null; + // Check if this task was banned before we start it. The empty check is used to avoid + // computing the hash code of the parent taskId as most of the time banedParents is empty. + if (task.getParentTaskId().isSet() && banedParents.isEmpty() == false) { + String reason = banedParents.get(task.getParentTaskId()); + if (reason != null) { + try { + holder.cancel(reason); + throw new TaskCancelledException("ProtobufTask cancelled before it started: " + reason); + } finally { + // let's clean up the registration + unregister(task); + } + } + } + } + + /** + * Cancels a task + *

+ * After starting cancellation on the parent task, the task manager tries to cancel all children tasks + * of the current task. Once cancellation of the children tasks is done, the listener is triggered. + * If the task is completed or unregistered from ProtobufTaskManager, then the listener is called immediately. + */ + public void cancel(ProtobufCancellableTask task, String reason, Runnable listener) { + CancellableTaskHolder holder = cancellableTasks.get(task.getId()); + if (holder != null) { + logger.trace("cancelling task with id {}", task.getId()); + holder.cancel(reason, listener); + } else { + listener.run(); + } + } + + /** + * Unregister the task + */ + public ProtobufTask unregister(ProtobufTask task) { + logger.trace("unregister task for id: {}", task.getId()); + + // Decrement the task's self-thread as part of unregistration. + task.decrementResourceTrackingThreads(); + + if (taskResourceConsumersEnabled) { + for (Consumer taskConsumer : taskResourceConsumer) { + try { + taskConsumer.accept(task); + } catch (Exception e) { + logger.error("error encountered when updating the consumer", e); + } + } + } + + if (task instanceof ProtobufCancellableTask) { + CancellableTaskHolder holder = cancellableTasks.remove(task.getId()); + if (holder != null) { + holder.finish(); + return holder.getTask(); + } else { + return null; + } + } else { + return tasks.remove(task.getId()); + } + } + + /** + * Register a node on which a child task will execute. The returned {@link Releasable} must be called + * to unregister the child node once the child task is completed or failed. + */ + public Releasable registerChildNode(long taskId, ProtobufDiscoveryNode node) { + final CancellableTaskHolder holder = cancellableTasks.get(taskId); + if (holder != null) { + logger.trace("register child node [{}] task [{}]", node, taskId); + holder.registerChildNode(node); + return Releasables.releaseOnce(() -> { + logger.trace("unregister child node [{}] task [{}]", node, taskId); + holder.unregisterChildNode(node); + }); + } + return () -> {}; + } + + public ProtobufDiscoveryNode localNode() { + return lastDiscoveryNodes.getLocalNode(); + } + + /** + * Stores the task failure + */ + public void storeResult(ProtobufTask task, Exception error, ActionListener listener) { + ProtobufDiscoveryNode localNode = lastDiscoveryNodes.getLocalNode(); + if (localNode == null) { + // too early to store anything, shouldn't really be here - just pass the error along + listener.onFailure(error); + return; + } + final ProtobufTaskResult taskResult; + try { + taskResult = task.result(localNode, error); + } catch (IOException ex) { + logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); + listener.onFailure(ex); + return; + } + taskResultsService.storeResult(taskResult, new ActionListener() { + @Override + public void onResponse(Void aVoid) { + listener.onFailure(error); + } + + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e); + listener.onFailure(e); + } + }); + } + + /** + * Stores the task result + */ + public void storeResult(ProtobufTask task, Response response, ActionListener listener) { + ProtobufDiscoveryNode localNode = lastDiscoveryNodes.getLocalNode(); + if (localNode == null) { + // too early to store anything, shouldn't really be here - just pass the response along + logger.warn("couldn't store response {}, the node didn't join the cluster yet", response); + listener.onResponse(response); + return; + } + final ProtobufTaskResult taskResult; + try { + taskResult = task.result(localNode, response); + } catch (IOException ex) { + logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), ex); + listener.onFailure(ex); + return; + } + + taskResultsService.storeResult(taskResult, new ActionListener() { + @Override + public void onResponse(Void aVoid) { + listener.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), e); + listener.onFailure(e); + } + }); + } + + /** + * Returns the list of currently running tasks on the node + */ + public Map getTasks() { + HashMap taskHashMap = new HashMap<>(this.tasks); + for (CancellableTaskHolder holder : cancellableTasks.values()) { + taskHashMap.put(holder.getTask().getId(), holder.getTask()); + } + return Collections.unmodifiableMap(taskHashMap); + } + + /** + * Returns the list of currently running tasks on the node that can be cancelled + */ + public Map getCancellableTasks() { + HashMap taskHashMap = new HashMap<>(); + for (CancellableTaskHolder holder : cancellableTasks.values()) { + taskHashMap.put(holder.getTask().getId(), holder.getTask()); + } + return Collections.unmodifiableMap(taskHashMap); + } + + /** + * Returns a task with given id, or null if the task is not found. + */ + public ProtobufTask getTask(long id) { + ProtobufTask task = tasks.get(id); + if (task != null) { + return task; + } else { + return getCancellableTask(id); + } + } + + /** + * Returns a cancellable task with given id, or null if the task is not found. + */ + public ProtobufCancellableTask getCancellableTask(long id) { + CancellableTaskHolder holder = cancellableTasks.get(id); + if (holder != null) { + return holder.getTask(); + } else { + return null; + } + } + + /** + * Returns the number of currently banned tasks. + *

+ * Will be used in task manager stats and for debugging. + */ + public int getBanCount() { + return banedParents.size(); + } + + /** + * Bans all tasks with the specified parent task from execution, cancels all tasks that are currently executing. + *

+ * This method is called when a parent task that has children is cancelled. + * + * @return a list of pending cancellable child tasks + */ + public List setBan(ProtobufTaskId parentTaskId, String reason) { + logger.trace("setting ban for the parent task {} {}", parentTaskId, reason); + + // Set the ban first, so the newly created tasks cannot be registered + synchronized (banedParents) { + if (lastDiscoveryNodes.nodeExists(parentTaskId.getNodeId())) { + // Only set the ban if the node is the part of the cluster + banedParents.put(parentTaskId, reason); + } + } + return cancellableTasks.values().stream().filter(t -> t.hasParent(parentTaskId)).map(t -> t.task).collect(Collectors.toList()); + } + + /** + * Removes the ban for the specified parent task. + *

+ * This method is called when a previously banned task finally cancelled + */ + public void removeBan(ProtobufTaskId parentTaskId) { + logger.trace("removing ban for the parent task {}", parentTaskId); + banedParents.remove(parentTaskId); + } + + // for testing + public Set getBannedTaskIds() { + return Collections.unmodifiableSet(banedParents.keySet()); + } + + /** + * Start rejecting new child requests as the parent task was cancelled. + * + * @param taskId the parent task id + * @param onChildTasksCompleted called when all child tasks are completed or failed + * @return the set of current nodes that have outstanding child tasks + */ + public Collection startBanOnChildrenNodes(long taskId, Runnable onChildTasksCompleted) { + final CancellableTaskHolder holder = cancellableTasks.get(taskId); + if (holder != null) { + return holder.startBan(onChildTasksCompleted); + } else { + onChildTasksCompleted.run(); + return Collections.emptySet(); + } + } + + @Override + public void applyClusterState(ProtobufClusterChangedEvent event) { + lastDiscoveryNodes = event.state().getNodes(); + if (event.nodesRemoved()) { + synchronized (banedParents) { + lastDiscoveryNodes = event.state().getNodes(); + // Remove all bans that were registered by nodes that are no longer in the cluster state + Iterator banIterator = banedParents.keySet().iterator(); + while (banIterator.hasNext()) { + ProtobufTaskId taskId = banIterator.next(); + if (lastDiscoveryNodes.nodeExists(taskId.getNodeId()) == false) { + logger.debug( + "Removing ban for the parent [{}] on the node [{}], reason: the parent node is gone", + taskId, + event.state().getNodes().getLocalNode() + ); + banIterator.remove(); + } + } + } + } + } + + /** + * Blocks the calling thread, waiting for the task to vanish from the ProtobufTaskManager. + */ + public void waitForTaskCompletion(ProtobufTask task, long untilInNanos) { + while (System.nanoTime() - untilInNanos < 0) { + if (getTask(task.getId()) == null) { + return; + } + try { + Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis()); + } catch (InterruptedException e) { + throw new OpenSearchException("Interrupted waiting for completion of [{}]", e, task); + } + } + throw new OpenSearchTimeoutException("Timed out waiting for completion of [{}]", task); + } + + /** + * Takes actions when a task is registered and its execution starts + * + * @param task getting executed. + * @return AutoCloseable to free up resources (clean up thread context) when task execution block returns + */ + public ThreadContext.StoredContext taskExecutionStarted(ProtobufTask task) { + if (taskResourceTrackingService.get() == null) return () -> {}; + + return taskResourceTrackingService.get().startTracking(task); + } + + private static class CancellableTaskHolder { + private final ProtobufCancellableTask task; + private boolean finished = false; + private List cancellationListeners = null; + private ObjectIntMap childTasksPerNode = null; + private boolean banChildren = false; + private List childTaskCompletedListeners = null; + + CancellableTaskHolder(ProtobufCancellableTask task) { + this.task = task; + } + + void cancel(String reason, Runnable listener) { + final Runnable toRun; + synchronized (this) { + if (finished) { + assert cancellationListeners == null; + toRun = listener; + } else { + toRun = () -> {}; + if (listener != null) { + if (cancellationListeners == null) { + cancellationListeners = new ArrayList<>(); + } + cancellationListeners.add(listener); + } + } + } + try { + task.cancel(reason); + } finally { + if (toRun != null) { + toRun.run(); + } + } + } + + void cancel(String reason) { + task.cancel(reason); + } + + /** + * Marks task as finished. + */ + public void finish() { + final List listeners; + synchronized (this) { + this.finished = true; + if (cancellationListeners != null) { + listeners = cancellationListeners; + cancellationListeners = null; + } else { + listeners = Collections.emptyList(); + } + } + // We need to call the listener outside of the synchronised section to avoid potential bottle necks + // in the listener synchronization + notifyListeners(listeners); + } + + private void notifyListeners(List listeners) { + assert Thread.holdsLock(this) == false; + Exception rootException = null; + for (Runnable listener : listeners) { + try { + listener.run(); + } catch (RuntimeException inner) { + rootException = ExceptionsHelper.useOrSuppress(rootException, inner); + } + } + ExceptionsHelper.reThrowIfNotNull(rootException); + } + + public boolean hasParent(ProtobufTaskId parentTaskId) { + return task.getParentTaskId().equals(parentTaskId); + } + + public ProtobufCancellableTask getTask() { + return task; + } + + synchronized void registerChildNode(ProtobufDiscoveryNode node) { + if (banChildren) { + throw new TaskCancelledException("The parent task was cancelled, shouldn't start any child tasks"); + } + if (childTasksPerNode == null) { + childTasksPerNode = new ObjectIntHashMap<>(); + } + childTasksPerNode.addTo(node, 1); + } + + void unregisterChildNode(ProtobufDiscoveryNode node) { + final List listeners; + synchronized (this) { + if (childTasksPerNode.addTo(node, -1) == 0) { + childTasksPerNode.remove(node); + } + if (childTasksPerNode.isEmpty() && this.childTaskCompletedListeners != null) { + listeners = childTaskCompletedListeners; + childTaskCompletedListeners = null; + } else { + listeners = Collections.emptyList(); + } + } + notifyListeners(listeners); + } + + Set startBan(Runnable onChildTasksCompleted) { + final Set pendingChildNodes; + final Runnable toRun; + synchronized (this) { + banChildren = true; + if (childTasksPerNode == null) { + pendingChildNodes = Collections.emptySet(); + } else { + pendingChildNodes = StreamSupport.stream(childTasksPerNode.spliterator(), false) + .map(e -> e.key) + .collect(Collectors.toSet()); + } + if (pendingChildNodes.isEmpty()) { + assert childTaskCompletedListeners == null; + toRun = onChildTasksCompleted; + } else { + toRun = () -> {}; + if (childTaskCompletedListeners == null) { + childTaskCompletedListeners = new ArrayList<>(); + } + childTaskCompletedListeners.add(onChildTasksCompleted); + } + } + toRun.run(); + return pendingChildNodes; + } + } + + /** + * Start tracking a cancellable task with its tcp channel, so if the channel gets closed we can get a set of + * pending tasks associated that channel and cancel them as these results won't be retrieved by the parent task. + * + * @return a releasable that should be called when this pending task is completed + */ + public Releasable startTrackingCancellableChannelTask(TcpChannel channel, ProtobufCancellableTask task) { + assert cancellableTasks.containsKey(task.getId()) : "task [" + task.getId() + "] is not registered yet"; + final ChannelPendingTaskTracker tracker = channelPendingTaskTrackers.compute(channel, (k, curr) -> { + if (curr == null) { + curr = new ChannelPendingTaskTracker(); + } + curr.addTask(task); + return curr; + }); + if (tracker.registered.compareAndSet(false, true)) { + channel.addCloseListener(ActionListener.wrap(r -> { + final ChannelPendingTaskTracker removedTracker = channelPendingTaskTrackers.remove(channel); + assert removedTracker == tracker; + cancelTasksOnChannelClosed(tracker.drainTasks()); + }, e -> { assert false : new AssertionError("must not be here", e); })); + } + return () -> tracker.removeTask(task); + } + + // for testing + final int numberOfChannelPendingTaskTrackers() { + return channelPendingTaskTrackers.size(); + } + + private static class ChannelPendingTaskTracker { + final AtomicBoolean registered = new AtomicBoolean(); + final Semaphore permits = Assertions.ENABLED ? new Semaphore(Integer.MAX_VALUE) : null; + final Set pendingTasks = ConcurrentCollections.newConcurrentSet(); + + void addTask(ProtobufCancellableTask task) { + assert permits.tryAcquire() : "tracker was drained"; + final boolean added = pendingTasks.add(task); + assert added : "task " + task.getId() + " is in the pending list already"; + assert releasePermit(); + } + + boolean acquireAllPermits() { + permits.acquireUninterruptibly(Integer.MAX_VALUE); + return true; + } + + boolean releasePermit() { + permits.release(); + return true; + } + + Set drainTasks() { + assert acquireAllPermits(); // do not release permits so we can't add tasks to this tracker after draining + return Collections.unmodifiableSet(pendingTasks); + } + + void removeTask(ProtobufCancellableTask task) { + final boolean removed = pendingTasks.remove(task); + assert removed : "task " + task.getId() + " is not in the pending list"; + } + } + + private void cancelTasksOnChannelClosed(Set tasks) { + if (tasks.isEmpty() == false) { + threadPool.generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + logger.warn("failed to cancel tasks on channel closed", e); + } + + @Override + protected void doRun() { + for (ProtobufCancellableTask task : tasks) { + cancelTaskAndDescendants(task, "channel was closed", false, ActionListener.wrap(() -> {})); + } + } + }); + } + } + + public void cancelTaskAndDescendants(ProtobufCancellableTask task, String reason, boolean waitForCompletion, ActionListener listener) { + final TaskCancellationService service = cancellationService.get(); + if (service != null) { + service.cancelTaskAndDescendants(task, reason, waitForCompletion, listener); + } else { + assert false : "TaskCancellationService is not initialized"; + throw new IllegalStateException("TaskCancellationService is not initialized"); + } + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java new file mode 100644 index 0000000000000..8b67855d2fc98 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java @@ -0,0 +1,270 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import com.sun.management.ThreadMXBean; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.ConcurrentMapLong; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.threadpool.ProtobufThreadPool; + +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.opensearch.tasks.ResourceStatsType.WORKER_STATS; + +/** + * Service that helps track resource usage of tasks running on a node. +*/ +@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") +public class ProtobufTaskResourceTrackingService implements RunnableTaskExecutionListener { + + private static final Logger logger = LogManager.getLogger(TaskManager.class); + + public static final Setting TASK_RESOURCE_TRACKING_ENABLED = Setting.boolSetting( + "task_resource_tracking.enabled", + true, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final String TASK_ID = "TASK_ID"; + + private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); + + private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + private final List taskCompletionListeners = new ArrayList<>(); + private final ProtobufThreadPool threadPool; + private volatile boolean taskResourceTrackingEnabled; + + @Inject + public ProtobufTaskResourceTrackingService(Settings settings, ClusterSettings clusterSettings, ProtobufThreadPool threadPool) { + this.taskResourceTrackingEnabled = TASK_RESOURCE_TRACKING_ENABLED.get(settings); + this.threadPool = threadPool; + clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_TRACKING_ENABLED, this::setTaskResourceTrackingEnabled); + } + + public void setTaskResourceTrackingEnabled(boolean taskResourceTrackingEnabled) { + this.taskResourceTrackingEnabled = taskResourceTrackingEnabled; + } + + public boolean isTaskResourceTrackingEnabled() { + return taskResourceTrackingEnabled; + } + + public boolean isTaskResourceTrackingSupported() { + return threadMXBean.isThreadAllocatedMemorySupported() && threadMXBean.isThreadAllocatedMemoryEnabled(); + } + + /** + * Executes logic only if task supports resource tracking and resource tracking setting is enabled. + *

+ * 1. Starts tracking the task in map of resourceAwareTasks. + * 2. Adds ProtobufTask Id in thread context to make sure it's available while task is processed across multiple threads. + * + * @param task for which resources needs to be tracked + * @return Autocloseable stored context to restore ThreadContext to the state before this method changed it. + */ + public ThreadContext.StoredContext startTracking(ProtobufTask task) { + if (task.supportsResourceTracking() == false + || isTaskResourceTrackingEnabled() == false + || isTaskResourceTrackingSupported() == false) { + return () -> {}; + } + + logger.debug("Starting resource tracking for task: {}", task.getId()); + resourceAwareTasks.put(task.getId(), task); + return addTaskIdToThreadContext(task); + } + + /** + * Stops tracking task registered earlier for tracking. + *

+ * It doesn't have feature enabled check to avoid any issues if setting was disable while the task was in progress. + *

+ * It's also responsible to stop tracking the current thread's resources against this task if not already done. + * This happens when the thread executing the request logic itself calls the unregister method. So in this case unregister + * happens before runnable finishes. + * + * @param task task which has finished and doesn't need resource tracking. + */ + public void stopTracking(ProtobufTask task) { + logger.debug("Stopping resource tracking for task: {}", task.getId()); + try { + if (isCurrentThreadWorkingOnTask(task)) { + taskExecutionFinishedOnThread(task.getId(), Thread.currentThread().getId()); + } + } catch (Exception e) { + logger.warn("Failed while trying to mark the task execution on current thread completed.", e); + assert false; + } finally { + resourceAwareTasks.remove(task.getId()); + } + + List exceptions = new ArrayList<>(); + for (TaskCompletionListener listener : taskCompletionListeners) { + try { + listener.onTaskCompleted(task); + } catch (Exception e) { + exceptions.add(e); + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); + } + + /** + * Refreshes the resource stats for the tasks provided by looking into which threads are actively working on these + * and how much resources these have consumed till now. + * + * @param tasks for which resource stats needs to be refreshed. + */ + public void refreshResourceStats(ProtobufTask... tasks) { + if (isTaskResourceTrackingEnabled() == false || isTaskResourceTrackingSupported() == false) { + return; + } + + for (ProtobufTask task : tasks) { + if (task.supportsResourceTracking() && resourceAwareTasks.containsKey(task.getId())) { + refreshResourceStats(task); + } + } + } + + private void refreshResourceStats(ProtobufTask resourceAwareTask) { + try { + logger.debug("Refreshing resource stats for ProtobufTask: {}", resourceAwareTask.getId()); + List threadsWorkingOnTask = getThreadsWorkingOnTask(resourceAwareTask); + threadsWorkingOnTask.forEach( + threadId -> resourceAwareTask.updateThreadResourceStats(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)) + ); + } catch (IllegalStateException e) { + logger.debug("Resource stats already updated."); + } + + } + + /** + * Called when a thread starts working on a task's runnable. + * + * @param taskId of the task for which runnable is starting + * @param threadId of the thread which will be executing the runnable and we need to check resource usage for this + * thread + */ + @Override + public void taskExecutionStartedOnThread(long taskId, long threadId) { + try { + final ProtobufTask task = resourceAwareTasks.get(taskId); + if (task != null) { + logger.debug("ProtobufTask execution started on thread. ProtobufTask: {}, Thread: {}", taskId, threadId); + task.startThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); + } + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Failed to mark thread execution started for task: [{}]", taskId), e); + assert false; + } + + } + + /** + * Called when a thread finishes working on a task's runnable. + * + * @param taskId of the task for which runnable is complete + * @param threadId of the thread which executed the runnable and we need to check resource usage for this thread + */ + @Override + public void taskExecutionFinishedOnThread(long taskId, long threadId) { + try { + final ProtobufTask task = resourceAwareTasks.get(taskId); + if (task != null) { + logger.debug("ProtobufTask execution finished on thread. ProtobufTask: {}, Thread: {}", taskId, threadId); + task.stopThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); + } + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Failed to mark thread execution finished for task: [{}]", taskId), e); + assert false; + } + } + + public Map getResourceAwareTasks() { + return Collections.unmodifiableMap(resourceAwareTasks); + } + + private ResourceUsageMetric[] getResourceUsageMetricsForThread(long threadId) { + ResourceUsageMetric currentMemoryUsage = new ResourceUsageMetric( + ResourceStats.MEMORY, + threadMXBean.getThreadAllocatedBytes(threadId) + ); + ResourceUsageMetric currentCPUUsage = new ResourceUsageMetric(ResourceStats.CPU, threadMXBean.getThreadCpuTime(threadId)); + return new ResourceUsageMetric[] { currentMemoryUsage, currentCPUUsage }; + } + + private boolean isCurrentThreadWorkingOnTask(ProtobufTask task) { + long threadId = Thread.currentThread().getId(); + List threadResourceInfos = task.getResourceStats().getOrDefault(threadId, Collections.emptyList()); + + for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { + if (threadResourceInfo.isActive()) { + return true; + } + } + return false; + } + + private List getThreadsWorkingOnTask(ProtobufTask task) { + List activeThreads = new ArrayList<>(); + for (List threadResourceInfos : task.getResourceStats().values()) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { + if (threadResourceInfo.isActive()) { + activeThreads.add(threadResourceInfo.getThreadId()); + } + } + } + return activeThreads; + } + + /** + * Adds ProtobufTask Id in the ThreadContext. + *

+ * Stashes the existing ThreadContext and preserves all the existing ThreadContext's data in the new ThreadContext + * as well. + * + * @param task for which ProtobufTask Id needs to be added in ThreadContext. + * @return StoredContext reference to restore the ThreadContext from which we created a new one. + * Caller can call context.restore() to get the existing ThreadContext back. + */ + private ThreadContext.StoredContext addTaskIdToThreadContext(ProtobufTask task) { + ThreadContext threadContext = threadPool.getThreadContext(); + ThreadContext.StoredContext storedContext = threadContext.newStoredContext(true, Collections.singletonList(TASK_ID)); + threadContext.putTransient(TASK_ID, task.getId()); + return storedContext; + } + + /** + * Listener that gets invoked when a task execution completes. + */ + public interface TaskCompletionListener { + void onTaskCompleted(ProtobufTask task); + } + + public void addTaskCompletionListener(TaskCompletionListener listener) { + this.taskCompletionListeners.add(listener); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java index 01be4eaaaf732..26cebbea22a6e 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java @@ -154,6 +154,53 @@ public void onFailure(Exception e) { } } + public void storeResult(ProtobufTaskResult taskResult, ActionListener listener) { + + ClusterState state = clusterService.state(); + + if (state.routingTable().hasIndex(TASK_INDEX) == false) { + CreateIndexRequest createIndexRequest = new CreateIndexRequest(); + createIndexRequest.settings(taskResultIndexSettings()); + createIndexRequest.index(TASK_INDEX); + createIndexRequest.mapping(taskResultIndexMapping()); + createIndexRequest.cause("auto(task api)"); + + client.admin().indices().create(createIndexRequest, new ActionListener() { + @Override + public void onResponse(CreateIndexResponse result) { + doStoreResult(taskResult, listener); + } + + @Override + public void onFailure(Exception e) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException) { + // we have the index, do it + try { + doStoreResult(taskResult, listener); + } catch (Exception inner) { + inner.addSuppressed(e); + listener.onFailure(inner); + } + } else { + listener.onFailure(e); + } + } + }); + } else { + IndexMetadata metadata = state.getMetadata().index(TASK_INDEX); + if (getTaskResultMappingVersion(metadata) < TASK_RESULT_MAPPING_VERSION) { + // The index already exists but doesn't have our mapping + client.admin() + .indices() + .preparePutMapping(TASK_INDEX) + .setSource(taskResultIndexMapping(), XContentType.JSON) + .execute(ActionListener.delegateFailure(listener, (l, r) -> doStoreResult(taskResult, listener))); + } else { + doStoreResult(taskResult, listener); + } + } + } + private int getTaskResultMappingVersion(IndexMetadata metadata) { MappingMetadata mappingMetadata = metadata.mapping(); if (mappingMetadata == null) { @@ -178,6 +225,17 @@ private void doStoreResult(TaskResult taskResult, ActionListener listener) doStoreResult(STORE_BACKOFF_POLICY.iterator(), index, listener); } + private void doStoreResult(ProtobufTaskResult taskResult, ActionListener listener) { + IndexRequestBuilder index = client.prepareIndex(TASK_INDEX).setId(taskResult.getTask().getTaskId().toString()); + try (XContentBuilder builder = XContentFactory.contentBuilder(Requests.INDEX_CONTENT_TYPE)) { + taskResult.toXContent(builder, ToXContent.EMPTY_PARAMS); + index.setSource(builder); + } catch (IOException e) { + throw new OpenSearchException("Couldn't convert task result to XContent for [{}]", e, taskResult.getTask()); + } + doStoreResult(STORE_BACKOFF_POLICY.iterator(), index, listener); + } + private void doStoreResult(Iterator backoff, IndexRequestBuilder index, ActionListener listener) { index.execute(new ActionListener() { @Override diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ProtobufExecutorBuilder.java new file mode 100644 index 0000000000000..3276e5029e6f2 --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/ProtobufExecutorBuilder.java @@ -0,0 +1,91 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.threadpool; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.concurrent.ThreadContext; + +import java.util.List; + +/** + * Base class for executor builders. +* +* @param the underlying type of the executor settings +* +* @opensearch.internal +*/ +public abstract class ProtobufExecutorBuilder { + + private final String name; + + public ProtobufExecutorBuilder(String name) { + this.name = name; + } + + protected String name() { + return name; + } + + protected static String settingsKey(final String prefix, final String key) { + return String.join(".", prefix, key); + } + + protected int applyHardSizeLimit(final Settings settings, final String name) { + if (name.equals("bulk") || name.equals(ThreadPool.Names.WRITE) || name.equals(ThreadPool.Names.SYSTEM_WRITE)) { + return 1 + OpenSearchExecutors.allocatedProcessors(settings); + } else { + return Integer.MAX_VALUE; + } + } + + /** + * The list of settings this builder will register. + * + * @return the list of registered settings + */ + public abstract List> getRegisteredSettings(); + + /** + * Return an executor settings object from the node-level settings. + * + * @param settings the node-level settings + * @return the executor settings object + */ + abstract U getSettings(Settings settings); + + /** + * Builds the executor with the specified executor settings. + * + * @param settings the executor settings + * @param threadContext the current thread context + * @return a new executor built from the specified executor settings + */ + abstract ProtobufThreadPool.ExecutorHolder build(U settings, ThreadContext threadContext); + + /** + * Format the thread pool info object for this executor. + * + * @param info the thread pool info object to format + * @return a formatted thread pool info (useful for logging) + */ + abstract String formatInfo(ProtobufThreadPool.Info info); + + abstract static class ExecutorSettings { + + protected final String nodeName; + + ExecutorSettings(String nodeName) { + this.nodeName = nodeName; + } + + } + +} diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufFixedExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ProtobufFixedExecutorBuilder.java new file mode 100644 index 0000000000000..6d5bce32533ab --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/ProtobufFixedExecutorBuilder.java @@ -0,0 +1,182 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.threadpool; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ProtobufSizeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.node.Node; + +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadFactory; + +/** + * A builder for fixed executors. +* +* @opensearch.internal +*/ +public final class ProtobufFixedExecutorBuilder extends ProtobufExecutorBuilder { + + private final Setting sizeSetting; + private final Setting queueSizeSetting; + + /** + * Construct a fixed executor builder; the settings will have the key prefix "thread_pool." followed by the executor name. + * + * @param settings the node-level settings + * @param name the name of the executor + * @param size the fixed number of threads + * @param queueSize the size of the backing queue, -1 for unbounded + */ + ProtobufFixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize) { + this(settings, name, size, queueSize, false); + } + + /** + * Construct a fixed executor builder; the settings will have the key prefix "thread_pool." followed by the executor name. + * + * @param settings the node-level settings + * @param name the name of the executor + * @param size the fixed number of threads + * @param queueSize the size of the backing queue, -1 for unbounded + * @param deprecated whether or not the thread pool is deprecated + */ + ProtobufFixedExecutorBuilder( + final Settings settings, + final String name, + final int size, + final int queueSize, + final boolean deprecated + ) { + this(settings, name, size, queueSize, "thread_pool." + name, deprecated); + } + + /** + * Construct a fixed executor builder. + * + * @param settings the node-level settings + * @param name the name of the executor + * @param size the fixed number of threads + * @param queueSize the size of the backing queue, -1 for unbounded + * @param prefix the prefix for the settings keys + */ + public ProtobufFixedExecutorBuilder( + final Settings settings, + final String name, + final int size, + final int queueSize, + final String prefix + ) { + this(settings, name, size, queueSize, prefix, false); + } + + /** + * Construct a fixed executor builder. + * + * @param settings the node-level settings + * @param name the name of the executor + * @param size the fixed number of threads + * @param queueSize the size of the backing queue, -1 for unbounded + * @param prefix the prefix for the settings keys + * @param deprecated whether or not the thread pool is deprecated + */ + public ProtobufFixedExecutorBuilder( + final Settings settings, + final String name, + final int size, + final int queueSize, + final String prefix, + final boolean deprecated + ) { + super(name); + final String sizeKey = settingsKey(prefix, "size"); + final Setting.Property[] properties; + if (deprecated) { + properties = new Setting.Property[] { Setting.Property.NodeScope, Setting.Property.Deprecated }; + } else { + properties = new Setting.Property[] { Setting.Property.NodeScope }; + } + this.sizeSetting = new Setting<>( + sizeKey, + s -> Integer.toString(size), + s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey), + properties + ); + final String queueSizeKey = settingsKey(prefix, "queue_size"); + this.queueSizeSetting = Setting.intSetting(queueSizeKey, queueSize, properties); + } + + @Override + public List> getRegisteredSettings() { + return Arrays.asList(sizeSetting, queueSizeSetting); + } + + @Override + FixedExecutorSettings getSettings(Settings settings) { + final String nodeName = Node.NODE_NAME_SETTING.get(settings); + final int size = sizeSetting.get(settings); + final int queueSize = queueSizeSetting.get(settings); + return new FixedExecutorSettings(nodeName, size, queueSize); + } + + @Override + ProtobufThreadPool.ExecutorHolder build(final FixedExecutorSettings settings, final ThreadContext threadContext) { + int size = settings.size; + int queueSize = settings.queueSize; + final ThreadFactory threadFactory = OpenSearchExecutors.daemonThreadFactory( + OpenSearchExecutors.threadName(settings.nodeName, name()) + ); + final ExecutorService executor = OpenSearchExecutors.newFixed( + settings.nodeName + "/" + name(), + size, + queueSize, + threadFactory, + threadContext + ); + final ProtobufThreadPool.Info info = new ProtobufThreadPool.Info( + name(), + ProtobufThreadPool.ThreadPoolType.FIXED, + size, + size, + null, + queueSize < 0 ? null : new ProtobufSizeValue(queueSize) + ); + return new ProtobufThreadPool.ExecutorHolder(executor, info); + } + + @Override + String formatInfo(ProtobufThreadPool.Info info) { + return String.format( + Locale.ROOT, + "name [%s], size [%d], queue size [%s]", + info.getName(), + info.getMax(), + info.getQueueSize() == null ? "unbounded" : info.getQueueSize() + ); + } + + static class FixedExecutorSettings extends ProtobufExecutorBuilder.ExecutorSettings { + + private final int size; + private final int queueSize; + + FixedExecutorSettings(final String nodeName, final int size, final int queueSize) { + super(nodeName); + this.size = size; + this.queueSize = queueSize; + } + + } + +} diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufResizableExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ProtobufResizableExecutorBuilder.java new file mode 100644 index 0000000000000..f95a13d2b6a82 --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/ProtobufResizableExecutorBuilder.java @@ -0,0 +1,134 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.threadpool; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ProtobufSizeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.node.Node; + +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicReference; + +/** + * A builder for resizable executors. +* +* @opensearch.internal +*/ +public final class ProtobufResizableExecutorBuilder extends ProtobufExecutorBuilder< + ProtobufResizableExecutorBuilder.ResizableExecutorSettings> { + + private final Setting sizeSetting; + private final Setting queueSizeSetting; + private final AtomicReference runnableTaskListener; + + ProtobufResizableExecutorBuilder( + final Settings settings, + final String name, + final int size, + final int queueSize, + final AtomicReference runnableTaskListener + ) { + this(settings, name, size, queueSize, "thread_pool." + name, runnableTaskListener); + } + + public ProtobufResizableExecutorBuilder( + final Settings settings, + final String name, + final int size, + final int queueSize, + final String prefix, + final AtomicReference runnableTaskListener + ) { + super(name); + final String sizeKey = settingsKey(prefix, "size"); + this.sizeSetting = new Setting<>( + sizeKey, + s -> Integer.toString(size), + s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey), + Setting.Property.NodeScope + ); + final String queueSizeKey = settingsKey(prefix, "queue_size"); + this.queueSizeSetting = Setting.intSetting( + queueSizeKey, + queueSize, + new Setting.Property[] { Setting.Property.NodeScope, Setting.Property.Dynamic } + ); + this.runnableTaskListener = runnableTaskListener; + } + + @Override + public List> getRegisteredSettings() { + return Arrays.asList(sizeSetting, queueSizeSetting); + } + + @Override + ResizableExecutorSettings getSettings(Settings settings) { + final String nodeName = Node.NODE_NAME_SETTING.get(settings); + final int size = sizeSetting.get(settings); + final int queueSize = queueSizeSetting.get(settings); + return new ResizableExecutorSettings(nodeName, size, queueSize); + } + + @Override + ProtobufThreadPool.ExecutorHolder build(final ResizableExecutorSettings settings, final ThreadContext threadContext) { + int size = settings.size; + int queueSize = settings.queueSize; + final ThreadFactory threadFactory = OpenSearchExecutors.daemonThreadFactory( + OpenSearchExecutors.threadName(settings.nodeName, name()) + ); + final ExecutorService executor = OpenSearchExecutors.newResizable( + settings.nodeName + "/" + name(), + size, + queueSize, + threadFactory, + threadContext, + runnableTaskListener + ); + final ProtobufThreadPool.Info info = new ProtobufThreadPool.Info( + name(), + ProtobufThreadPool.ThreadPoolType.RESIZABLE, + size, + size, + null, + queueSize < 0 ? null : new ProtobufSizeValue(queueSize) + ); + return new ProtobufThreadPool.ExecutorHolder(executor, info); + } + + @Override + String formatInfo(ProtobufThreadPool.Info info) { + return String.format( + Locale.ROOT, + "name [%s], size [%d], queue size [%s]", + info.getName(), + info.getMax(), + info.getQueueSize() == null ? "unbounded" : info.getQueueSize() + ); + } + + static class ResizableExecutorSettings extends ProtobufExecutorBuilder.ExecutorSettings { + + private final int size; + private final int queueSize; + + ResizableExecutorSettings(final String nodeName, final int size, final int queueSize) { + super(nodeName); + this.size = size; + this.queueSize = queueSize; + } + + } +} diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufScalingExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ProtobufScalingExecutorBuilder.java new file mode 100644 index 0000000000000..583dfb4e785ba --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/ProtobufScalingExecutorBuilder.java @@ -0,0 +1,141 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.threadpool; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.node.Node; + +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +/** + * A builder for scaling executors. +* +* @opensearch.internal +*/ +public final class ProtobufScalingExecutorBuilder extends ProtobufExecutorBuilder { + + private final Setting coreSetting; + private final Setting maxSetting; + private final Setting keepAliveSetting; + + /** + * Construct a scaling executor builder; the settings will have the + * key prefix "thread_pool." followed by the executor name. + * + * @param name the name of the executor + * @param core the minimum number of threads in the pool + * @param max the maximum number of threads in the pool + * @param keepAlive the time that spare threads above {@code core} + * threads will be kept alive + */ + public ProtobufScalingExecutorBuilder(final String name, final int core, final int max, final TimeValue keepAlive) { + this(name, core, max, keepAlive, "thread_pool." + name); + } + + /** + * Construct a scaling executor builder; the settings will have the + * specified key prefix. + * + * @param name the name of the executor + * @param core the minimum number of threads in the pool + * @param max the maximum number of threads in the pool + * @param keepAlive the time that spare threads above {@code core} + * threads will be kept alive + * @param prefix the prefix for the settings keys + */ + public ProtobufScalingExecutorBuilder( + final String name, + final int core, + final int max, + final TimeValue keepAlive, + final String prefix + ) { + super(name); + this.coreSetting = Setting.intSetting(settingsKey(prefix, "core"), core, Setting.Property.NodeScope); + this.maxSetting = Setting.intSetting(settingsKey(prefix, "max"), max, Setting.Property.NodeScope); + this.keepAliveSetting = Setting.timeSetting(settingsKey(prefix, "keep_alive"), keepAlive, Setting.Property.NodeScope); + } + + @Override + public List> getRegisteredSettings() { + return Arrays.asList(coreSetting, maxSetting, keepAliveSetting); + } + + @Override + ScalingExecutorSettings getSettings(Settings settings) { + final String nodeName = Node.NODE_NAME_SETTING.get(settings); + final int coreThreads = coreSetting.get(settings); + final int maxThreads = maxSetting.get(settings); + final TimeValue keepAlive = keepAliveSetting.get(settings); + return new ScalingExecutorSettings(nodeName, coreThreads, maxThreads, keepAlive); + } + + ProtobufThreadPool.ExecutorHolder build(final ScalingExecutorSettings settings, final ThreadContext threadContext) { + TimeValue keepAlive = settings.keepAlive; + int core = settings.core; + int max = settings.max; + final ProtobufThreadPool.Info info = new ProtobufThreadPool.Info( + name(), + ProtobufThreadPool.ThreadPoolType.SCALING, + core, + max, + keepAlive, + null + ); + final ThreadFactory threadFactory = OpenSearchExecutors.daemonThreadFactory( + OpenSearchExecutors.threadName(settings.nodeName, name()) + ); + final ExecutorService executor = OpenSearchExecutors.newScaling( + settings.nodeName + "/" + name(), + core, + max, + keepAlive.millis(), + TimeUnit.MILLISECONDS, + threadFactory, + threadContext + ); + return new ProtobufThreadPool.ExecutorHolder(executor, info); + } + + @Override + String formatInfo(ProtobufThreadPool.Info info) { + return String.format( + Locale.ROOT, + "name [%s], core [%d], max [%d], keep alive [%s]", + info.getName(), + info.getMin(), + info.getMax(), + info.getKeepAlive() + ); + } + + static class ScalingExecutorSettings extends ProtobufExecutorBuilder.ExecutorSettings { + + private final int core; + private final int max; + private final TimeValue keepAlive; + + ScalingExecutorSettings(final String nodeName, final int core, final int max, final TimeValue keepAlive) { + super(nodeName); + this.core = core; + this.max = max; + this.keepAlive = keepAlive; + } + } + +} diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java new file mode 100644 index 0000000000000..d48704fa5777a --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java @@ -0,0 +1,860 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.threadpool; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.Version; +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ProtobufSizeValue; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.XRejectedExecutionHandler; +import org.opensearch.node.Node; +import org.opensearch.node.ProtobufReportingService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.RejectedExecutionHandler; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static java.util.Collections.unmodifiableMap; + +/** + * The OpenSearch threadpool class +* +* @opensearch.internal +*/ +public class ProtobufThreadPool implements ProtobufReportingService, Scheduler { + + private static final Logger logger = LogManager.getLogger(ThreadPool.class); + + /** + * The threadpool names. + * + * @opensearch.internal + */ + public static class Names { + public static final String SAME = "same"; + public static final String GENERIC = "generic"; + @Deprecated + public static final String LISTENER = "listener"; + public static final String GET = "get"; + public static final String ANALYZE = "analyze"; + public static final String WRITE = "write"; + public static final String SEARCH = "search"; + public static final String SEARCH_THROTTLED = "search_throttled"; + public static final String MANAGEMENT = "management"; + public static final String FLUSH = "flush"; + public static final String REFRESH = "refresh"; + public static final String WARMER = "warmer"; + public static final String SNAPSHOT = "snapshot"; + public static final String FORCE_MERGE = "force_merge"; + public static final String FETCH_SHARD_STARTED = "fetch_shard_started"; + public static final String FETCH_SHARD_STORE = "fetch_shard_store"; + public static final String SYSTEM_READ = "system_read"; + public static final String SYSTEM_WRITE = "system_write"; + public static final String TRANSLOG_TRANSFER = "translog_transfer"; + public static final String TRANSLOG_SYNC = "translog_sync"; + public static final String REMOTE_PURGE = "remote_purge"; + } + + /** + * The threadpool type. + * + * @opensearch.internal + */ + public enum ThreadPoolType { + DIRECT("direct"), + FIXED("fixed"), + RESIZABLE("resizable"), + SCALING("scaling"); + + private final String type; + + public String getType() { + return type; + } + + ThreadPoolType(String type) { + this.type = type; + } + + private static final Map TYPE_MAP; + + static { + Map typeMap = new HashMap<>(); + for (ThreadPoolType threadPoolType : ThreadPoolType.values()) { + typeMap.put(threadPoolType.getType(), threadPoolType); + } + TYPE_MAP = Collections.unmodifiableMap(typeMap); + } + + public static ThreadPoolType fromType(String type) { + ThreadPoolType threadPoolType = TYPE_MAP.get(type); + if (threadPoolType == null) { + throw new IllegalArgumentException("no ThreadPoolType for " + type); + } + return threadPoolType; + } + } + + public static final Map THREAD_POOL_TYPES; + + static { + HashMap map = new HashMap<>(); + map.put(Names.SAME, ThreadPoolType.DIRECT); + map.put(Names.GENERIC, ThreadPoolType.SCALING); + map.put(Names.LISTENER, ThreadPoolType.FIXED); + map.put(Names.GET, ThreadPoolType.FIXED); + map.put(Names.ANALYZE, ThreadPoolType.FIXED); + map.put(Names.WRITE, ThreadPoolType.FIXED); + map.put(Names.SEARCH, ThreadPoolType.RESIZABLE); + map.put(Names.MANAGEMENT, ThreadPoolType.SCALING); + map.put(Names.FLUSH, ThreadPoolType.SCALING); + map.put(Names.REFRESH, ThreadPoolType.SCALING); + map.put(Names.WARMER, ThreadPoolType.SCALING); + map.put(Names.SNAPSHOT, ThreadPoolType.SCALING); + map.put(Names.FORCE_MERGE, ThreadPoolType.FIXED); + map.put(Names.FETCH_SHARD_STARTED, ThreadPoolType.SCALING); + map.put(Names.FETCH_SHARD_STORE, ThreadPoolType.SCALING); + map.put(Names.SEARCH_THROTTLED, ThreadPoolType.RESIZABLE); + map.put(Names.SYSTEM_READ, ThreadPoolType.FIXED); + map.put(Names.SYSTEM_WRITE, ThreadPoolType.FIXED); + map.put(Names.TRANSLOG_TRANSFER, ThreadPoolType.SCALING); + map.put(Names.TRANSLOG_SYNC, ThreadPoolType.FIXED); + map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING); + THREAD_POOL_TYPES = Collections.unmodifiableMap(map); + } + + private final Map executors; + + private final ProtobufThreadPoolInfo threadPoolInfo; + + private final CachedTimeThread cachedTimeThread; + + static final ExecutorService DIRECT_EXECUTOR = OpenSearchExecutors.newDirectExecutorService(); + + private final ThreadContext threadContext; + + private final Map builders; + + private final ScheduledThreadPoolExecutor scheduler; + + public Collection builders() { + return Collections.unmodifiableCollection(builders.values()); + } + + public static Setting ESTIMATED_TIME_INTERVAL_SETTING = Setting.timeSetting( + "thread_pool.estimated_time_interval", + TimeValue.timeValueMillis(200), + TimeValue.ZERO, + Setting.Property.NodeScope + ); + + public ProtobufThreadPool(final Settings settings, final ProtobufExecutorBuilder... customBuilders) { + this(settings, null, customBuilders); + } + + public ProtobufThreadPool( + final Settings settings, + final AtomicReference runnableTaskListener, + final ProtobufExecutorBuilder... customBuilders + ) { + assert Node.NODE_NAME_SETTING.exists(settings); + + final Map builders = new HashMap<>(); + final int allocatedProcessors = OpenSearchExecutors.allocatedProcessors(settings); + final int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors); + final int halfProcMaxAt10 = halfAllocatedProcessorsMaxTen(allocatedProcessors); + final int genericThreadPoolMax = boundedBy(4 * allocatedProcessors, 128, 512); + builders.put( + Names.GENERIC, + new ProtobufScalingExecutorBuilder(Names.GENERIC, 4, genericThreadPoolMax, TimeValue.timeValueSeconds(30)) + ); + builders.put(Names.WRITE, new ProtobufFixedExecutorBuilder(settings, Names.WRITE, allocatedProcessors, 10000)); + builders.put(Names.GET, new ProtobufFixedExecutorBuilder(settings, Names.GET, allocatedProcessors, 1000)); + builders.put(Names.ANALYZE, new ProtobufFixedExecutorBuilder(settings, Names.ANALYZE, 1, 16)); + builders.put( + Names.SEARCH, + new ProtobufResizableExecutorBuilder( + settings, + Names.SEARCH, + searchThreadPoolSize(allocatedProcessors), + 1000, + runnableTaskListener + ) + ); + builders.put( + Names.SEARCH_THROTTLED, + new ProtobufResizableExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100, runnableTaskListener) + ); + builders.put(Names.MANAGEMENT, new ProtobufScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5))); + // no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded + // the assumption here is that the listeners should be very lightweight on the listeners side + builders.put(Names.LISTENER, new ProtobufFixedExecutorBuilder(settings, Names.LISTENER, halfProcMaxAt10, -1, true)); + builders.put(Names.FLUSH, new ProtobufScalingExecutorBuilder(Names.FLUSH, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + builders.put(Names.REFRESH, new ProtobufScalingExecutorBuilder(Names.REFRESH, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5))); + builders.put(Names.WARMER, new ProtobufScalingExecutorBuilder(Names.WARMER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + builders.put(Names.SNAPSHOT, new ProtobufScalingExecutorBuilder(Names.SNAPSHOT, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + builders.put( + Names.FETCH_SHARD_STARTED, + new ProtobufScalingExecutorBuilder(Names.FETCH_SHARD_STARTED, 1, 2 * allocatedProcessors, TimeValue.timeValueMinutes(5)) + ); + builders.put(Names.FORCE_MERGE, new ProtobufFixedExecutorBuilder(settings, Names.FORCE_MERGE, 1, -1)); + builders.put( + Names.FETCH_SHARD_STORE, + new ProtobufScalingExecutorBuilder(Names.FETCH_SHARD_STORE, 1, 2 * allocatedProcessors, TimeValue.timeValueMinutes(5)) + ); + builders.put(Names.SYSTEM_READ, new ProtobufFixedExecutorBuilder(settings, Names.SYSTEM_READ, halfProcMaxAt5, 2000, false)); + builders.put(Names.SYSTEM_WRITE, new ProtobufFixedExecutorBuilder(settings, Names.SYSTEM_WRITE, halfProcMaxAt5, 1000, false)); + builders.put( + Names.TRANSLOG_TRANSFER, + new ProtobufScalingExecutorBuilder(Names.TRANSLOG_TRANSFER, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5)) + ); + builders.put(Names.TRANSLOG_SYNC, new ProtobufFixedExecutorBuilder(settings, Names.TRANSLOG_SYNC, allocatedProcessors * 4, 10000)); + builders.put( + Names.REMOTE_PURGE, + new ProtobufScalingExecutorBuilder(Names.REMOTE_PURGE, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5)) + ); + + for (final ProtobufExecutorBuilder builder : customBuilders) { + if (builders.containsKey(builder.name())) { + throw new IllegalArgumentException("builder with name [" + builder.name() + "] already exists"); + } + builders.put(builder.name(), builder); + } + this.builders = Collections.unmodifiableMap(builders); + + threadContext = new ThreadContext(settings); + + final Map executors = new HashMap<>(); + for (final Map.Entry entry : builders.entrySet()) { + final ProtobufExecutorBuilder.ExecutorSettings executorSettings = entry.getValue().getSettings(settings); + final ExecutorHolder executorHolder = entry.getValue().build(executorSettings, threadContext); + if (executors.containsKey(executorHolder.info.getName())) { + throw new IllegalStateException("duplicate executors with name [" + executorHolder.info.getName() + "] registered"); + } + logger.debug("created thread pool: {}", entry.getValue().formatInfo(executorHolder.info)); + executors.put(entry.getKey(), executorHolder); + } + + executors.put(Names.SAME, new ExecutorHolder(DIRECT_EXECUTOR, new Info(Names.SAME, ThreadPoolType.DIRECT))); + this.executors = unmodifiableMap(executors); + + final List infos = executors.values() + .stream() + .filter(holder -> holder.info.getName().equals("same") == false) + .map(holder -> holder.info) + .collect(Collectors.toList()); + this.threadPoolInfo = new ProtobufThreadPoolInfo(infos); + this.scheduler = Scheduler.initScheduler(settings); + TimeValue estimatedTimeInterval = ESTIMATED_TIME_INTERVAL_SETTING.get(settings); + this.cachedTimeThread = new CachedTimeThread(OpenSearchExecutors.threadName(settings, "[timer]"), estimatedTimeInterval.millis()); + this.cachedTimeThread.start(); + } + + /** + * Returns a value of milliseconds that may be used for relative time calculations. + * + * This method should only be used for calculating time deltas. For an epoch based + * timestamp, see {@link #absoluteTimeInMillis()}. + */ + public long relativeTimeInMillis() { + return TimeValue.nsecToMSec(relativeTimeInNanos()); + } + + /** + * Returns a value of nanoseconds that may be used for relative time calculations. + * + * This method should only be used for calculating time deltas. For an epoch based + * timestamp, see {@link #absoluteTimeInMillis()}. + */ + public long relativeTimeInNanos() { + return cachedTimeThread.relativeTimeInNanos(); + } + + /** + * Returns the value of milliseconds since UNIX epoch. + * + * This method should only be used for exact date/time formatting. For calculating + * time deltas that should not suffer from negative deltas, which are possible with + * this method, see {@link #relativeTimeInMillis()}. + */ + public long absoluteTimeInMillis() { + return cachedTimeThread.absoluteTimeInMillis(); + } + + @Override + public ProtobufThreadPoolInfo info() { + return threadPoolInfo; + } + + public Info info(String name) { + ExecutorHolder holder = executors.get(name); + if (holder == null) { + return null; + } + return holder.info; + } + + public ThreadPoolStats stats() { + List stats = new ArrayList<>(); + for (ExecutorHolder holder : executors.values()) { + final String name = holder.info.getName(); + // no need to have info on "same" thread pool + if ("same".equals(name)) { + continue; + } + int threads = -1; + int queue = -1; + int active = -1; + long rejected = -1; + int largest = -1; + long completed = -1; + if (holder.executor() instanceof ThreadPoolExecutor) { + ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) holder.executor(); + threads = threadPoolExecutor.getPoolSize(); + queue = threadPoolExecutor.getQueue().size(); + active = threadPoolExecutor.getActiveCount(); + largest = threadPoolExecutor.getLargestPoolSize(); + completed = threadPoolExecutor.getCompletedTaskCount(); + RejectedExecutionHandler rejectedExecutionHandler = threadPoolExecutor.getRejectedExecutionHandler(); + if (rejectedExecutionHandler instanceof XRejectedExecutionHandler) { + rejected = ((XRejectedExecutionHandler) rejectedExecutionHandler).rejected(); + } + } + stats.add(new ThreadPoolStats.Stats(name, threads, queue, active, rejected, largest, completed)); + } + return new ThreadPoolStats(stats); + } + + /** + * Get the generic {@link ExecutorService}. This executor service + * {@link Executor#execute(Runnable)} method will run the {@link Runnable} it is given in the + * {@link ThreadContext} of the thread that queues it. + *

+ * Warning: this {@linkplain ExecutorService} will not throw {@link RejectedExecutionException} + * if you submit a task while it shutdown. It will instead silently queue it and not run it. + */ + public ExecutorService generic() { + return executor(Names.GENERIC); + } + + /** + * Get the {@link ExecutorService} with the given name. This executor service's + * {@link Executor#execute(Runnable)} method will run the {@link Runnable} it is given in the + * {@link ThreadContext} of the thread that queues it. + *

+ * Warning: this {@linkplain ExecutorService} might not throw {@link RejectedExecutionException} + * if you submit a task while it shutdown. It will instead silently queue it and not run it. + * + * @param name the name of the executor service to obtain + * @throws IllegalArgumentException if no executor service with the specified name exists + */ + public ExecutorService executor(String name) { + final ExecutorHolder holder = executors.get(name); + if (holder == null) { + throw new IllegalArgumentException("no executor service found for [" + name + "]"); + } + return holder.executor(); + } + + /** + * Schedules a one-shot command to run after a given delay. The command is run in the context of the calling thread. + * + * @param command the command to run + * @param delay delay before the task executes + * @param executor the name of the thread pool on which to execute this task. SAME means "execute on the scheduler thread" which changes + * the meaning of the ScheduledFuture returned by this method. In that case the ScheduledFuture will complete only when the + * command completes. + * @return a ScheduledFuture who's get will return when the task is has been added to its target thread pool and throw an exception if + * the task is canceled before it was added to its target thread pool. Once the task has been added to its target thread pool + * the ScheduledFuture will cannot interact with it. + * @throws OpenSearchRejectedExecutionException if the task cannot be scheduled for execution + */ + @Override + public ScheduledCancellable schedule(Runnable command, TimeValue delay, String executor) { + command = threadContext.preserveContext(command); + if (!Names.SAME.equals(executor)) { + command = new ThreadedRunnable(command, executor(executor)); + } + return new ScheduledCancellableAdapter(scheduler.schedule(command, delay.millis(), TimeUnit.MILLISECONDS)); + } + + public void scheduleUnlessShuttingDown(TimeValue delay, String executor, Runnable command) { + try { + schedule(command, delay, executor); + } catch (OpenSearchRejectedExecutionException e) { + if (e.isExecutorShutdown()) { + logger.debug( + new ParameterizedMessage( + "could not schedule execution of [{}] after [{}] on [{}] as executor is shut down", + command, + delay, + executor + ), + e + ); + } else { + throw e; + } + } + } + + @Override + public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, String executor) { + return new ReschedulingRunnable(command, interval, executor, this, (e) -> { + if (logger.isDebugEnabled()) { + logger.debug(() -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", command, executor), e); + } + }, + (e) -> logger.warn( + () -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", command, executor), + e + ) + ); + } + + protected final void stopCachedTimeThread() { + cachedTimeThread.running = false; + cachedTimeThread.interrupt(); + } + + public void shutdown() { + stopCachedTimeThread(); + scheduler.shutdown(); + for (ExecutorHolder executor : executors.values()) { + if (executor.executor() instanceof ThreadPoolExecutor) { + executor.executor().shutdown(); + } + } + } + + public void shutdownNow() { + stopCachedTimeThread(); + scheduler.shutdownNow(); + for (ExecutorHolder executor : executors.values()) { + if (executor.executor() instanceof ThreadPoolExecutor) { + executor.executor().shutdownNow(); + } + } + } + + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + boolean result = scheduler.awaitTermination(timeout, unit); + for (ExecutorHolder executor : executors.values()) { + if (executor.executor() instanceof ThreadPoolExecutor) { + result &= executor.executor().awaitTermination(timeout, unit); + } + } + cachedTimeThread.join(unit.toMillis(timeout)); + return result; + } + + public ScheduledExecutorService scheduler() { + return this.scheduler; + } + + /** + * Constrains a value between minimum and maximum values + * (inclusive). + * + * @param value the value to constrain + * @param min the minimum acceptable value + * @param max the maximum acceptable value + * @return min if value is less than min, max if value is greater + * than value, otherwise value + */ + static int boundedBy(int value, int min, int max) { + return Math.min(max, Math.max(min, value)); + } + + static int halfAllocatedProcessorsMaxFive(final int allocatedProcessors) { + return boundedBy((allocatedProcessors + 1) / 2, 1, 5); + } + + static int halfAllocatedProcessorsMaxTen(final int allocatedProcessors) { + return boundedBy((allocatedProcessors + 1) / 2, 1, 10); + } + + static int twiceAllocatedProcessors(final int allocatedProcessors) { + return boundedBy(2 * allocatedProcessors, 2, Integer.MAX_VALUE); + } + + public static int searchThreadPoolSize(final int allocatedProcessors) { + return ((allocatedProcessors * 3) / 2) + 1; + } + + class LoggingRunnable implements Runnable { + + private final Runnable runnable; + + LoggingRunnable(Runnable runnable) { + this.runnable = runnable; + } + + @Override + public void run() { + try { + runnable.run(); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("failed to run {}", runnable.toString()), e); + throw e; + } + } + + @Override + public int hashCode() { + return runnable.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return runnable.equals(obj); + } + + @Override + public String toString() { + return "[threaded] " + runnable.toString(); + } + } + + class ThreadedRunnable implements Runnable { + + private final Runnable runnable; + + private final Executor executor; + + ThreadedRunnable(Runnable runnable, Executor executor) { + this.runnable = runnable; + this.executor = executor; + } + + @Override + public void run() { + try { + executor.execute(runnable); + } catch (OpenSearchRejectedExecutionException e) { + if (e.isExecutorShutdown()) { + logger.debug( + new ParameterizedMessage( + "could not schedule execution of [{}] on [{}] as executor is shut down", + runnable, + executor + ), + e + ); + } else { + throw e; + } + } + } + + @Override + public int hashCode() { + return runnable.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return runnable.equals(obj); + } + + @Override + public String toString() { + return "[threaded] " + runnable.toString(); + } + } + + /** + * A thread to cache millisecond time values from + * {@link System#nanoTime()} and {@link System#currentTimeMillis()}. + * + * The values are updated at a specified interval. + */ + static class CachedTimeThread extends Thread { + + final long interval; + volatile boolean running = true; + volatile long relativeNanos; + volatile long absoluteMillis; + + CachedTimeThread(String name, long interval) { + super(name); + this.interval = interval; + this.relativeNanos = System.nanoTime(); + this.absoluteMillis = System.currentTimeMillis(); + setDaemon(true); + } + + /** + * Return the current time used for relative calculations. This is {@link System#nanoTime()}. + *

+ * If {@link ThreadPool#ESTIMATED_TIME_INTERVAL_SETTING} is set to 0 + * then the cache is disabled and the method calls {@link System#nanoTime()} + * whenever called. Typically used for testing. + */ + long relativeTimeInNanos() { + if (0 < interval) { + return relativeNanos; + } + return System.nanoTime(); + } + + /** + * Return the current epoch time, used to find absolute time. This is + * a cached version of {@link System#currentTimeMillis()}. + *

+ * If {@link ThreadPool#ESTIMATED_TIME_INTERVAL_SETTING} is set to 0 + * then the cache is disabled and the method calls {@link System#currentTimeMillis()} + * whenever called. Typically used for testing. + */ + long absoluteTimeInMillis() { + if (0 < interval) { + return absoluteMillis; + } + return System.currentTimeMillis(); + } + + @Override + public void run() { + while (running && 0 < interval) { + relativeNanos = System.nanoTime(); + absoluteMillis = System.currentTimeMillis(); + try { + Thread.sleep(interval); + } catch (InterruptedException e) { + running = false; + return; + } + } + } + } + + static class ExecutorHolder { + private final ExecutorService executor; + public final Info info; + + ExecutorHolder(ExecutorService executor, Info info) { + assert executor instanceof OpenSearchThreadPoolExecutor || executor == DIRECT_EXECUTOR; + this.executor = executor; + this.info = info; + } + + ExecutorService executor() { + return executor; + } + } + + /** + * The thread pool information. + * + * @opensearch.internal + */ + public static class Info implements ProtobufWriteable { + + private final String name; + private final ThreadPoolType type; + private final int min; + private final int max; + private final TimeValue keepAlive; + private final ProtobufSizeValue queueSize; + + public Info(String name, ThreadPoolType type) { + this(name, type, -1); + } + + public Info(String name, ThreadPoolType type, int size) { + this(name, type, size, size, null, null); + } + + public Info( + String name, + ThreadPoolType type, + int min, + int max, + @Nullable TimeValue keepAlive, + @Nullable ProtobufSizeValue queueSize + ) { + this.name = name; + this.type = type; + this.min = min; + this.max = max; + this.keepAlive = keepAlive; + this.queueSize = queueSize; + } + + public Info(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + name = in.readString(); + final String typeStr = in.readString(); + // Opensearch on or after 3.0.0 version doesn't know about "fixed_auto_queue_size" thread pool. Convert it to RESIZABLE. + if (typeStr.equalsIgnoreCase("fixed_auto_queue_size")) { + type = ThreadPoolType.RESIZABLE; + } else { + type = ThreadPoolType.fromType(typeStr); + } + min = in.readInt32(); + max = in.readInt32(); + keepAlive = protobufStreamInput.readOptionalTimeValue(in); + queueSize = protobufStreamInput.readOptionalWriteable(ProtobufSizeValue::new, in); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + out.writeStringNoTag(name); + if (type == ThreadPoolType.RESIZABLE && protobufStreamOutput.getVersion().before(Version.V_3_0_0)) { + // Opensearch on older version doesn't know about "resizable" thread pool. Convert RESIZABLE to FIXED + // to avoid serialization/de-serization issue between nodes with different OpenSearch version + out.writeStringNoTag(ThreadPoolType.FIXED.getType()); + } else { + out.writeStringNoTag(type.getType()); + } + out.writeInt32NoTag(min); + out.writeInt32NoTag(max); + protobufStreamOutput.writeOptionalTimeValue(keepAlive, out); + protobufStreamOutput.writeOptionalWriteable(queueSize, out); + } + + public String getName() { + return this.name; + } + + public ThreadPoolType getThreadPoolType() { + return this.type; + } + + public int getMin() { + return this.min; + } + + public int getMax() { + return this.max; + } + + @Nullable + public TimeValue getKeepAlive() { + return this.keepAlive; + } + + @Nullable + public ProtobufSizeValue getQueueSize() { + return this.queueSize; + } + } + + /** + * Returns true if the given service was terminated successfully. If the termination timed out, + * the service is null this method will return false. + */ + public static boolean terminate(ExecutorService service, long timeout, TimeUnit timeUnit) { + if (service != null) { + service.shutdown(); + if (awaitTermination(service, timeout, timeUnit)) return true; + service.shutdownNow(); + return awaitTermination(service, timeout, timeUnit); + } + return false; + } + + private static boolean awaitTermination(final ExecutorService service, final long timeout, final TimeUnit timeUnit) { + try { + if (service.awaitTermination(timeout, timeUnit)) { + return true; + } + } catch (final InterruptedException e) { + Thread.currentThread().interrupt(); + } + return false; + } + + /** + * Returns true if the given pool was terminated successfully. If the termination timed out, + * the service is null this method will return false. + */ + public static boolean terminate(ThreadPool pool, long timeout, TimeUnit timeUnit) { + if (pool != null) { + // Leverage try-with-resources to close the threadpool + pool.shutdown(); + if (awaitTermination(pool, timeout, timeUnit)) { + return true; + } + // last resort + pool.shutdownNow(); + return awaitTermination(pool, timeout, timeUnit); + } + return false; + } + + private static boolean awaitTermination(final ThreadPool threadPool, final long timeout, final TimeUnit timeUnit) { + try { + if (threadPool.awaitTermination(timeout, timeUnit)) { + return true; + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + return false; + } + + public ThreadContext getThreadContext() { + return threadContext; + } + + public static boolean assertNotScheduleThread(String reason) { + assert Thread.currentThread().getName().contains("scheduler") == false : "Expected current thread [" + + Thread.currentThread() + + "] to not be the scheduler thread. Reason: [" + + reason + + "]"; + return true; + } + + public static boolean assertCurrentMethodIsNotCalledRecursively() { + final StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); + assert stackTraceElements.length >= 3 : stackTraceElements.length; + assert stackTraceElements[0].getMethodName().equals("getStackTrace") : stackTraceElements[0]; + assert stackTraceElements[1].getMethodName().equals("assertCurrentMethodIsNotCalledRecursively") : stackTraceElements[1]; + final StackTraceElement testingMethod = stackTraceElements[2]; + for (int i = 3; i < stackTraceElements.length; i++) { + assert stackTraceElements[i].getClassName().equals(testingMethod.getClassName()) == false + || stackTraceElements[i].getMethodName().equals(testingMethod.getMethodName()) == false : testingMethod.getClassName() + + "#" + + testingMethod.getMethodName() + + " is called recursively"; + } + return true; + } +} diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java new file mode 100644 index 0000000000000..5188abedd67ca --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java @@ -0,0 +1,51 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.threadpool; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.node.ProtobufReportingService; + +import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +/** + * Information about a threadpool +* +* @opensearch.internal +*/ +public class ProtobufThreadPoolInfo implements ProtobufReportingService.ProtobufInfo, Iterable { + + private final List infos; + + public ProtobufThreadPoolInfo(List infos) { + this.infos = Collections.unmodifiableList(infos); + } + + public ProtobufThreadPoolInfo(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + this.infos = Collections.unmodifiableList(protobufStreamInput.readList(ProtobufThreadPool.Info::new, in)); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + protobufStreamOutput.writeCollection(infos, (o, v) -> v.writeTo(o), out); + } + + @Override + public Iterator iterator() { + return infos.iterator(); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufNetworkMessage.java b/server/src/main/java/org/opensearch/transport/ProtobufNetworkMessage.java new file mode 100644 index 0000000000000..89fa327ecc0dd --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufNetworkMessage.java @@ -0,0 +1,62 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.Version; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.util.concurrent.ThreadContext; + +/** + * Represents a transport message sent over the network. Subclasses implement serialization and +* deserialization. +* +* @opensearch.internal +*/ +public abstract class ProtobufNetworkMessage { + + protected final Version version; + protected final ProtobufWriteable threadContext; + protected final long requestId; + protected final byte status; + + ProtobufNetworkMessage(ThreadContext threadContext, Version version, byte status, long requestId) { + this.threadContext = (ProtobufWriteable) threadContext.captureAsWriteable(); + this.version = version; + this.requestId = requestId; + this.status = status; + } + + public Version getVersion() { + return version; + } + + public long getRequestId() { + return requestId; + } + + boolean isCompress() { + return TransportStatus.isCompress(status); + } + + boolean isResponse() { + return TransportStatus.isRequest(status) == false; + } + + boolean isRequest() { + return TransportStatus.isRequest(status); + } + + boolean isHandshake() { + return TransportStatus.isHandshake(status); + } + + boolean isError() { + return TransportStatus.isError(status); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java b/server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java new file mode 100644 index 0000000000000..0bfc14e93d7da --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java @@ -0,0 +1,191 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedOutputStream; +import org.opensearch.Version; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.bytes.CompositeBytesReference; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.util.concurrent.ThreadContext; + +import java.io.IOException; +import java.util.Set; + +/** + * Outbound data as a message +* +* @opensearch.internal +*/ +abstract class ProtobufOutboundMessage extends ProtobufNetworkMessage { + + private final ProtobufWriteable message; + + ProtobufOutboundMessage(ThreadContext threadContext, Version version, byte status, long requestId, ProtobufWriteable message) { + super(threadContext, version, status, requestId); + this.message = message; + } + + BytesReference serialize(CodedOutputStream bytesStream) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + protobufStreamOutput.setVersion(version); + // bytesStream.skip(TcpHeader.headerSize(version)); + + // The compressible bytes stream will not close the underlying bytes stream + BytesReference reference; + int variableHeaderLength = -1; + final long preHeaderPosition = bytesStream.position(); + writeVariableHeader(bytesStream); + variableHeaderLength = Math.toIntExact(bytesStream.position() - preHeaderPosition); + + try (CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bytesStream, TransportStatus.isCompress(status))) { + stream.setVersion(version); + stream.setFeatures(bytesStream.getFeatures()); + + if (variableHeaderLength == -1) { + writeVariableHeader(stream); + } + reference = writeMessage(stream); + } + + bytesStream.seek(0); + final int contentSize = reference.length() - TcpHeader.headerSize(version); + TcpHeader.writeHeader(bytesStream, requestId, status, version, contentSize, variableHeaderLength); + return reference; + } + + protected void writeVariableHeader(CodedOutputStream stream) throws IOException { + threadContext.writeTo(stream); + } + + protected BytesReference writeMessage(CompressibleBytesOutputStream stream) throws IOException { + final BytesReference zeroCopyBuffer; + if (message instanceof BytesTransportRequest) { + BytesTransportRequest bRequest = (BytesTransportRequest) message; + bRequest.writeThin(stream); + zeroCopyBuffer = bRequest.bytes; + } else if (message instanceof RemoteTransportException) { + stream.writeException((RemoteTransportException) message); + zeroCopyBuffer = BytesArray.EMPTY; + } else { + message.writeTo(stream); + zeroCopyBuffer = BytesArray.EMPTY; + } + // we have to call materializeBytes() here before accessing the bytes. A CompressibleBytesOutputStream + // might be implementing compression. And materializeBytes() ensures that some marker bytes (EOS marker) + // are written. Otherwise we barf on the decompressing end when we read past EOF on purpose in the + // #validateRequest method. this might be a problem in deflate after all but it's important to write + // the marker bytes. + final BytesReference message = stream.materializeBytes(); + if (zeroCopyBuffer.length() == 0) { + return message; + } else { + return CompositeBytesReference.of(message, zeroCopyBuffer); + } + } + + /** + * Internal outbound message request + * + * @opensearch.internal + */ + static class Request extends ProtobufOutboundMessage { + + private final String[] features; + private final String action; + + Request( + ThreadContext threadContext, + String[] features, + ProtobufWriteable message, + Version version, + String action, + long requestId, + boolean isHandshake, + boolean compress + ) { + super(threadContext, version, setStatus(compress, isHandshake, message), requestId, message); + this.features = features; + this.action = action; + } + + @Override + protected void writeVariableHeader(StreamOutput stream) throws IOException { + super.writeVariableHeader(stream); + stream.writeStringArray(features); + stream.writeString(action); + } + + private static byte setStatus(boolean compress, boolean isHandshake, ProtobufWriteable message) { + byte status = 0; + status = TransportStatus.setRequest(status); + if (compress && ProtobufOutboundMessage.canCompress(message)) { + status = TransportStatus.setCompress(status); + } + if (isHandshake) { + status = TransportStatus.setHandshake(status); + } + + return status; + } + } + + /** + * Internal message response + * + * @opensearch.internal + */ + static class Response extends ProtobufOutboundMessage { + + private final Set features; + + Response( + ThreadContext threadContext, + Set features, + ProtobufWriteable message, + Version version, + long requestId, + boolean isHandshake, + boolean compress + ) { + super(threadContext, version, setStatus(compress, isHandshake, message), requestId, message); + this.features = features; + } + + @Override + protected void writeVariableHeader(StreamOutput stream) throws IOException { + super.writeVariableHeader(stream); + stream.setFeatures(features); + } + + private static byte setStatus(boolean compress, boolean isHandshake, ProtobufWriteable message) { + byte status = 0; + status = TransportStatus.setResponse(status); + if (message instanceof RemoteTransportException) { + status = TransportStatus.setError(status); + } + if (compress) { + status = TransportStatus.setCompress(status); + } + if (isHandshake) { + status = TransportStatus.setHandshake(status); + } + + return status; + } + } + + private static boolean canCompress(ProtobufWriteable message) { + return message instanceof BytesTransportRequest == false; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java new file mode 100644 index 0000000000000..1ddeb04f106a8 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java @@ -0,0 +1,125 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.tasks.ProtobufCancellableTask; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.tasks.ProtobufTaskManager; + +import java.io.IOException; + +/** + * Registry for OpenSearch RequestHandlers +* +* @opensearch.internal +*/ +public final class ProtobufRequestHandlerRegistry { + + private final String action; + private final ProtobufTransportRequestHandler handler; + private final boolean forceExecution; + private final boolean canTripCircuitBreaker; + private final String executor; + private final ProtobufTaskManager taskManager; + private final ProtobufWriteable.Reader requestReader; + + public ProtobufRequestHandlerRegistry( + String action, + ProtobufWriteable.Reader requestReader, + ProtobufTaskManager taskManager, + ProtobufTransportRequestHandler handler, + String executor, + boolean forceExecution, + boolean canTripCircuitBreaker + ) { + this.action = action; + this.requestReader = requestReader; + this.handler = handler; + this.forceExecution = forceExecution; + this.canTripCircuitBreaker = canTripCircuitBreaker; + this.executor = executor; + this.taskManager = taskManager; + } + + public String getAction() { + return action; + } + + public Request newRequest(StreamInput in) throws IOException { + return requestReader.read(in); + } + + public void processMessageReceived(Request request, TransportChannel channel) throws Exception { + final ProtobufTask task = taskManager.register(channel.getChannelType(), action, request); + ThreadContext.StoredContext contextToRestore = taskManager.taskExecutionStarted(task); + + Releasable unregisterTask = () -> taskManager.unregister(task); + try { + if (channel instanceof TcpTransportChannel && task instanceof ProtobufCancellableTask) { + // if (request instanceof ShardSearchRequest) { + // // on receiving request, update the inbound network time to reflect time spent in transit over the network + // ((ShardSearchRequest) request).setInboundNetworkTime( + // Math.max(0, System.currentTimeMillis() - ((ShardSearchRequest) request).getInboundNetworkTime()) + // ); + // } + final TcpChannel tcpChannel = ((TcpTransportChannel) channel).getChannel(); + final Releasable stopTracking = taskManager.startTrackingCancellableChannelTask(tcpChannel, (ProtobufCancellableTask) task); + unregisterTask = Releasables.wrap(unregisterTask, stopTracking); + } + final ProtobufTaskTransportChannel taskTransportChannel = new ProtobufTaskTransportChannel(channel, unregisterTask); + handler.messageReceived(request, taskTransportChannel, task); + unregisterTask = null; + } finally { + Releasables.close(unregisterTask); + contextToRestore.restore(); + } + } + + public boolean isForceExecution() { + return forceExecution; + } + + public boolean canTripCircuitBreaker() { + return canTripCircuitBreaker; + } + + public String getExecutor() { + return executor; + } + + public ProtobufTransportRequestHandler getHandler() { + return handler; + } + + @Override + public String toString() { + return handler.toString(); + } + + public static RequestHandlerRegistry replaceHandler( + RequestHandlerRegistry registry, + ProtobufTransportRequestHandler handler + ) { + return new RequestHandlerRegistry<>( + registry.action, + registry.requestReader, + registry.taskManager, + handler, + registry.executor, + registry.forceExecution, + registry.canTripCircuitBreaker + ); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTaskTransportChannel.java b/server/src/main/java/org/opensearch/transport/ProtobufTaskTransportChannel.java new file mode 100644 index 0000000000000..4e4851331de12 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTaskTransportChannel.java @@ -0,0 +1,67 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.Version; +import org.opensearch.common.lease.Releasable; + +import java.io.IOException; + +/** + * Transport channel for tasks +* +* @opensearch.internal +*/ +public class ProtobufTaskTransportChannel implements ProtobufTransportChannel { + + private final ProtobufTransportChannel channel; + private final Releasable onTaskFinished; + + ProtobufTaskTransportChannel(ProtobufTransportChannel channel, Releasable onTaskFinished) { + this.channel = channel; + this.onTaskFinished = onTaskFinished; + } + + @Override + public String getProfileName() { + return channel.getProfileName(); + } + + @Override + public String getChannelType() { + return channel.getChannelType(); + } + + @Override + public void sendResponse(ProtobufTransportResponse response) throws IOException { + try { + onTaskFinished.close(); + } finally { + channel.sendResponse(response); + } + } + + @Override + public void sendResponse(Exception exception) throws IOException { + try { + onTaskFinished.close(); + } finally { + channel.sendResponse(exception); + } + } + + @Override + public Version getVersion() { + return channel.getVersion(); + } + + public ProtobufTransportChannel getChannel() { + return channel; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTcpTransportChannel.java b/server/src/main/java/org/opensearch/transport/ProtobufTcpTransportChannel.java new file mode 100644 index 0000000000000..81aa03275d263 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTcpTransportChannel.java @@ -0,0 +1,112 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.Version; +import org.opensearch.common.lease.Releasable; +import org.opensearch.search.query.QuerySearchResult; + +import java.io.IOException; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Channel for a TCP connection +* +* @opensearch.internal +*/ +public final class ProtobufTcpTransportChannel implements ProtobufTransportChannel { + + private final AtomicBoolean released = new AtomicBoolean(); + private final OutboundHandler outboundHandler; + private final TcpChannel channel; + private final String action; + private final long requestId; + private final Version version; + private final Set features; + private final boolean compressResponse; + private final boolean isHandshake; + private final Releasable breakerRelease; + + ProtobufTcpTransportChannel( + OutboundHandler outboundHandler, + TcpChannel channel, + String action, + long requestId, + Version version, + Set features, + boolean compressResponse, + boolean isHandshake, + Releasable breakerRelease + ) { + this.version = version; + this.features = features; + this.channel = channel; + this.outboundHandler = outboundHandler; + this.action = action; + this.requestId = requestId; + this.compressResponse = compressResponse; + this.isHandshake = isHandshake; + this.breakerRelease = breakerRelease; + } + + @Override + public String getProfileName() { + return channel.getProfile(); + } + + @Override + public void sendResponse(ProtobufTransportResponse response) throws IOException { + try { + // if (response instanceof QuerySearchResult && ((QuerySearchResult) response).getShardSearchRequest() != null) { + // // update outbound network time with current time before sending response over network + // ((QuerySearchResult) response).getShardSearchRequest().setOutboundNetworkTime(System.currentTimeMillis()); + // } + outboundHandler.sendResponse(version, features, channel, requestId, action, response, compressResponse, isHandshake); + } finally { + release(false); + } + } + + @Override + public void sendResponse(Exception exception) throws IOException { + try { + outboundHandler.sendErrorResponse(version, features, channel, requestId, action, exception); + } finally { + release(true); + } + } + + private Exception releaseBy; + + private void release(boolean isExceptionResponse) { + if (released.compareAndSet(false, true)) { + assert (releaseBy = new Exception()) != null; // easier to debug if it's already closed + breakerRelease.close(); + } else if (isExceptionResponse == false) { + // only fail if we are not sending an error - we might send the error triggered by the previous + // sendResponse call + throw new IllegalStateException("reserved bytes are already released", releaseBy); + } + } + + @Override + public String getChannelType() { + return "transport"; + } + + @Override + public Version getVersion() { + return version; + } + + public TcpChannel getChannel() { + return channel; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransport.java b/server/src/main/java/org/opensearch/transport/ProtobufTransport.java new file mode 100644 index 0000000000000..08d0ad17de49c --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransport.java @@ -0,0 +1,274 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.collect.MapBuilder; +import org.opensearch.common.component.LifecycleComponent; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.ConcurrentMapLong; + +import java.io.Closeable; +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Predicate; + +/** + * OpenSearch ProtobufTransport Interface +* +* @opensearch.internal +*/ +public interface ProtobufTransport extends LifecycleComponent { + + /** + * Registers a new request handler + */ + default void registerRequestHandler(RequestHandlerRegistry reg) { + getRequestHandlers().registerHandler(reg); + } + + void setMessageListener(ProtobufTransportMessageListener listener); + + default void setSlowLogThreshold(TimeValue slowLogThreshold) {} + + default boolean isSecure() { + return false; + } + + /** + * The address the transport is bound on. + */ + ProtobufBoundTransportAddress boundAddress(); + + /** + * Further profile bound addresses + * @return null iff profiles are unsupported, otherwise a map with name of profile and its bound transport address + */ + Map profileBoundAddresses(); + + /** + * Returns an address from its string representation. + */ + TransportAddress[] addressesFromString(String address) throws UnknownHostException; + + /** + * Returns a list of all local addresses for this transport + */ + List getDefaultSeedAddresses(); + + /** + * Opens a new connection to the given node. When the connection is fully connected, the listener is called. + * The ActionListener will be called on the calling thread or the generic thread pool. + */ + void openConnection(DiscoveryNode node, ConnectionProfile profile, ActionListener listener); + + TransportStats getStats(); + + ResponseHandlers getResponseHandlers(); + + RequestHandlers getRequestHandlers(); + + /** + * A unidirectional connection to a {@link DiscoveryNode} + */ + interface Connection extends Closeable { + /** + * The node this connection is associated with + */ + DiscoveryNode getNode(); + + /** + * Sends the request to the node this connection is associated with + * @param requestId see {@link ResponseHandlers#add(ResponseContext)} for details + * @param action the action to execute + * @param request the request to send + * @param options request options to apply + * @throws NodeNotConnectedException if the given node is not connected + */ + void sendRequest(long requestId, String action, ProtobufTransportRequest request, TransportRequestOptions options) throws IOException, + TransportException; + + /** + * The listener's {@link ActionListener#onResponse(Object)} method will be called when this + * connection is closed. No implementations currently throw an exception during close, so + * {@link ActionListener#onFailure(Exception)} will not be called. + * + * @param listener to be called + */ + void addCloseListener(ActionListener listener); + + boolean isClosed(); + + /** + * Returns the version of the node this connection was established with. + */ + default Version getVersion() { + return getNode().getVersion(); + } + + /** + * Returns a key that this connection can be cached on. Delegating subclasses must delegate method call to + * the original connection. + */ + default Object getCacheKey() { + return this; + } + + @Override + void close(); + } + + /** + * This class represents a response context that encapsulates the actual response handler, the action and the connection it was + * executed on. + */ + final class ResponseContext { + + private final TransportResponseHandler handler; + + private final Connection connection; + + private final String action; + + ResponseContext(TransportResponseHandler handler, Connection connection, String action) { + this.handler = handler; + this.connection = connection; + this.action = action; + } + + public TransportResponseHandler handler() { + return handler; + } + + public Connection connection() { + return this.connection; + } + + public String action() { + return this.action; + } + } + + /** + * This class is a registry that allows + */ + final class ResponseHandlers { + private final ConcurrentMapLong> handlers = ConcurrentCollections + .newConcurrentMapLongWithAggressiveConcurrency(); + private final AtomicLong requestIdGenerator = new AtomicLong(); + + /** + * Returns true if the give request ID has a context associated with it. + */ + public boolean contains(long requestId) { + return handlers.containsKey(requestId); + } + + /** + * Removes and return the {@link ResponseContext} for the given request ID or returns + * null if no context is associated with this request ID. + */ + public ResponseContext remove(long requestId) { + return handlers.remove(requestId); + } + + /** + * Adds a new response context and associates it with a new request ID. + * @return the new request ID + * @see Connection#sendRequest(long, String, ProtobufTransportRequest, TransportRequestOptions) + */ + public long add(ResponseContext holder) { + long requestId = newRequestId(); + ResponseContext existing = handlers.put(requestId, holder); + assert existing == null : "request ID already in use: " + requestId; + return requestId; + } + + /** + * Returns a new request ID to use when sending a message via {@link Connection#sendRequest(long, String, + * ProtobufTransportRequest, TransportRequestOptions)} + */ + long newRequestId() { + return requestIdGenerator.incrementAndGet(); + } + + /** + * Removes and returns all {@link ResponseContext} instances that match the predicate + */ + public List> prune(Predicate> predicate) { + final List> holders = new ArrayList<>(); + for (Map.Entry> entry : handlers.entrySet()) { + ResponseContext holder = entry.getValue(); + if (predicate.test(holder)) { + ResponseContext remove = handlers.remove(entry.getKey()); + if (remove != null) { + holders.add(holder); + } + } + } + return holders; + } + + /** + * called by the {@link ProtobufTransport} implementation when a response or an exception has been received for a previously + * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not + * found. + */ + public TransportResponseHandler onResponseReceived( + final long requestId, + final ProtobufTransportMessageListener listener + ) { + ResponseContext context = handlers.remove(requestId); + listener.onResponseReceived(requestId, context); + if (context == null) { + return null; + } else { + return context.handler(); + } + } + } + + /** + * Request handler implementations + * + * @opensearch.internal + */ + final class RequestHandlers { + + private volatile Map> requestHandlers = Collections.emptyMap(); + + synchronized void registerHandler(RequestHandlerRegistry reg) { + if (requestHandlers.containsKey(reg.getAction())) { + throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); + } + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + } + + // TODO: Only visible for testing. Perhaps move StubbableTransport from + // org.opensearch.test.transport to org.opensearch.transport + public synchronized void forceRegister(RequestHandlerRegistry reg) { + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + } + + @SuppressWarnings("unchecked") + public RequestHandlerRegistry getHandler(String action) { + return (RequestHandlerRegistry) requestHandlers.get(action); + } + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportChannel.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportChannel.java new file mode 100644 index 0000000000000..f9eac58f4e246 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportChannel.java @@ -0,0 +1,56 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.Version; + +import java.io.IOException; + +/** + * A transport channel allows to send a response to a request on the channel. +* +* @opensearch.internal +*/ +public interface ProtobufTransportChannel { + + Logger logger = LogManager.getLogger(ProtobufTransportChannel.class); + + String getProfileName(); + + String getChannelType(); + + void sendResponse(ProtobufTransportResponse response) throws IOException; + + void sendResponse(Exception exception) throws IOException; + + /** + * Returns the version of the other party that this channel will send a response to. + */ + default Version getVersion() { + return Version.CURRENT; + } + + /** + * A helper method to send an exception and handle and log a subsequent exception + */ + static void sendErrorResponse(ProtobufTransportChannel channel, String actionName, ProtobufTransportRequest request, Exception e) { + try { + channel.sendResponse(e); + } catch (Exception sendException) { + sendException.addSuppressed(e); + logger.warn( + () -> new ParameterizedMessage("Failed to send error response for action [{}] and request [{}]", actionName, request), + sendException + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportInfo.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportInfo.java new file mode 100644 index 0000000000000..633955d561fad --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportInfo.java @@ -0,0 +1,138 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.Nullable; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; +import org.opensearch.node.ProtobufReportingService; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.common.Booleans.parseBoolean; + +/** + * Transport information +* +* @opensearch.internal +*/ +public class ProtobufTransportInfo implements ProtobufReportingService.ProtobufInfo { + + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TransportInfo.class); + + /** Whether to add hostname to publish host field when serializing. */ + private static final boolean CNAME_IN_PUBLISH_ADDRESS = parseBoolean( + System.getProperty("opensearch.transport.cname_in_publish_address"), + false + ); + + private final ProtobufBoundTransportAddress address; + private Map profileAddresses; + private final boolean cnameInPublishAddress; + + public ProtobufTransportInfo( + ProtobufBoundTransportAddress address, + @Nullable Map profileAddresses + ) { + this(address, profileAddresses, CNAME_IN_PUBLISH_ADDRESS); + } + + public ProtobufTransportInfo( + ProtobufBoundTransportAddress address, + @Nullable Map profileAddresses, + boolean cnameInPublishAddress + ) { + this.address = address; + this.profileAddresses = profileAddresses; + this.cnameInPublishAddress = cnameInPublishAddress; + } + + public ProtobufTransportInfo(CodedInputStream in) throws IOException { + address = new ProtobufBoundTransportAddress(in); + int size = in.readInt32(); + if (size > 0) { + profileAddresses = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String key = in.readString(); + ProtobufBoundTransportAddress value = new ProtobufBoundTransportAddress(in); + profileAddresses.put(key, value); + } + } + this.cnameInPublishAddress = CNAME_IN_PUBLISH_ADDRESS; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + address.writeTo(out); + if (profileAddresses != null) { + out.writeInt32NoTag(profileAddresses.size()); + } else { + out.writeInt32NoTag(0); + } + if (profileAddresses != null && profileAddresses.size() > 0) { + for (Map.Entry entry : profileAddresses.entrySet()) { + out.writeStringNoTag(entry.getKey()); + entry.getValue().writeTo(out); + } + } + } + + static final class Fields { + static final String TRANSPORT = "transport"; + static final String BOUND_ADDRESS = "bound_address"; + static final String PUBLISH_ADDRESS = "publish_address"; + static final String PROFILES = "profiles"; + } + + private String formatPublishAddressString(String propertyName, ProtobufTransportAddress publishAddress) { + String publishAddressString = publishAddress.toString(); + String hostString = publishAddress.address().getHostString(); + if (InetAddresses.isInetAddress(hostString) == false) { + if (cnameInPublishAddress) { + publishAddressString = hostString + '/' + publishAddress.toString(); + } else { + deprecationLogger.deprecate( + "cname_in_publish_address_" + propertyName, + propertyName + + " was printed as [ip:port] instead of [hostname/ip:port]. " + + "This format is deprecated and will change to [hostname/ip:port] in a future version. " + + "Use -Dopensearch.transport.cname_in_publish_address=true to enforce non-deprecated formatting." + ); + } + } + return publishAddressString; + } + + public ProtobufBoundTransportAddress address() { + return address; + } + + public ProtobufBoundTransportAddress getAddress() { + return address(); + } + + public Map getProfileAddresses() { + return profileAddresses(); + } + + public Map profileAddresses() { + return profileAddresses; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java new file mode 100644 index 0000000000000..0d54c80512c03 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java @@ -0,0 +1,69 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.cluster.node.ProtobufDiscoveryNode; + +/** + * Listens for transport messages +* +* @opensearch.internal +*/ +public interface ProtobufTransportMessageListener { + + ProtobufTransportMessageListener NOOP_LISTENER = new ProtobufTransportMessageListener() { + }; + + /** + * Called once a request is received + * @param requestId the internal request ID + * @param action the request action + * + */ + default void onRequestReceived(long requestId, String action) {} + + /** + * Called for every action response sent after the response has been passed to the underlying network implementation. + * @param requestId the request ID (unique per client) + * @param action the request action + * @param response the response send + */ + default void onResponseSent(long requestId, String action, ProtobufTransportResponse response) {} + + /*** + * Called for every failed action response after the response has been passed to the underlying network implementation. + * @param requestId the request ID (unique per client) + * @param action the request action + * @param error the error sent back to the caller + */ + default void onResponseSent(long requestId, String action, Exception error) {} + + /** + * Called for every request sent to a server after the request has been passed to the underlying network implementation + * @param node the node the request was sent to + * @param requestId the internal request id + * @param action the action name + * @param request the actual request + * @param finalOptions the request options + */ + default void onRequestSent( + ProtobufDiscoveryNode node, + long requestId, + String action, + ProtobufTransportRequest request, + TransportRequestOptions finalOptions + ) {} + + /** + * Called for every response received + * @param requestId the request id for this reponse + * @param context the response context or null if the context was already processed ie. due to a timeout. + */ + default void onResponseReceived(long requestId, ProtobufTransport.ResponseContext context) {} +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java new file mode 100644 index 0000000000000..6eb9b7fd72f4e --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java @@ -0,0 +1,22 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.tasks.Task; + +/** + * Handles transport requests +* +* @opensearch.internal +*/ +public interface ProtobufTransportRequestHandler { + + void messageReceived(T request, TransportChannel channel, ProtobufTask task) throws Exception; +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportResponse.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportResponse.java new file mode 100644 index 0000000000000..9a5a0d90328b3 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportResponse.java @@ -0,0 +1,59 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Response over the transport interface +* +* @opensearch.internal +*/ +public abstract class ProtobufTransportResponse extends ProtobufTransportMessage { + + /** + * Constructs a new empty transport response + */ + public ProtobufTransportResponse() {} + + /** + * Constructs a new transport response with the data from the {@link StreamInput}. This is + * currently a no-op. However, this exists to allow extenders to call super(in) + * so that reading can mirror writing where we often call super.writeTo(out). + */ + public ProtobufTransportResponse(CodedInputStream in) throws IOException { + super(in); + } + + /** + * Empty transport response + * + * @opensearch.internal + */ + public static class Empty extends ProtobufTransportResponse { + public static final Empty INSTANCE = new Empty(); + + @Override + public String toString() { + return "Empty{}"; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException {} + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java new file mode 100644 index 0000000000000..369419be1c8f6 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java @@ -0,0 +1,1557 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionListenerResponseHandler; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.Nullable; +import org.opensearch.common.Strings; +import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.regex.Regex; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.node.NodeClosedException; +import org.opensearch.node.ProtobufReportingService; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskManager; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; + +/** + * The main OpenSearch transport service +* +* @opensearch.internal +*/ +public class ProtobufTransportService extends AbstractLifecycleComponent + implements + ProtobufReportingService, + TransportMessageListener, + TransportConnectionListener { + private static final Logger logger = LogManager.getLogger(ProtobufTransportService.class); + + public static final String DIRECT_RESPONSE_PROFILE = ".direct"; + public static final String HANDSHAKE_ACTION_NAME = "internal:transport/handshake"; + + private final AtomicBoolean handleIncomingRequests = new AtomicBoolean(); + private final DelegatingTransportMessageListener messageListener = new DelegatingTransportMessageListener(); + protected final Transport transport; + protected final ConnectionManager connectionManager; + protected final ThreadPool threadPool; + protected final ClusterName clusterName; + protected final TaskManager taskManager; + private final TransportInterceptor.AsyncSender asyncSender; + private final Function localNodeFactory; + private final boolean remoteClusterClient; + private final Transport.ResponseHandlers responseHandlers; + private final TransportInterceptor interceptor; + + // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they + // do show up, we can print more descriptive information about them + final Map timeoutInfoHandlers = Collections.synchronizedMap( + new LinkedHashMap(100, .75F, true) { + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + return size() > 100; + } + } + ); + + public static final TransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new TransportInterceptor() { + }; + + // tracer log + + private final Logger tracerLog; + + volatile String[] tracerLogInclude; + volatile String[] tracerLogExclude; + + private final RemoteClusterService remoteClusterService; + + /** if set will call requests sent to this id to shortcut and executed locally */ + volatile DiscoveryNode localNode = null; + private final Transport.Connection localNodeConnection = new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return localNode; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws TransportException { + sendLocalRequest(requestId, action, request, options); + } + + @Override + public void addCloseListener(ActionListener listener) {} + + @Override + public boolean isClosed() { + return false; + } + + @Override + public void close() {} + }; + + /** + * Build the service. + * + * @param clusterSettings if non null, the {@linkplain ProtobufTransportService} will register with the {@link ClusterSettings} for settings + * * updates for {@link TransportSettings#TRACE_LOG_EXCLUDE_SETTING} and {@link TransportSettings#TRACE_LOG_INCLUDE_SETTING}. + */ + public ProtobufTransportService( + Settings settings, + Transport transport, + ThreadPool threadPool, + TransportInterceptor transportInterceptor, + Function localNodeFactory, + @Nullable ClusterSettings clusterSettings, + Set taskHeaders + ) { + this( + settings, + transport, + threadPool, + transportInterceptor, + localNodeFactory, + clusterSettings, + taskHeaders, + new ClusterConnectionManager(settings, transport) + ); + } + + public ProtobufTransportService( + Settings settings, + Transport transport, + ThreadPool threadPool, + TransportInterceptor transportInterceptor, + Function localNodeFactory, + @Nullable ClusterSettings clusterSettings, + Set taskHeaders, + ConnectionManager connectionManager + ) { + this.transport = transport; + transport.setSlowLogThreshold(TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING.get(settings)); + this.threadPool = threadPool; + this.localNodeFactory = localNodeFactory; + this.connectionManager = connectionManager; + this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); + setTracerLogInclude(TransportSettings.TRACE_LOG_INCLUDE_SETTING.get(settings)); + setTracerLogExclude(TransportSettings.TRACE_LOG_EXCLUDE_SETTING.get(settings)); + tracerLog = Loggers.getLogger(logger, ".tracer"); + taskManager = createTaskManager(settings, clusterSettings, threadPool, taskHeaders); + this.interceptor = transportInterceptor; + this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); + this.remoteClusterClient = DiscoveryNode.isRemoteClusterClient(settings); + remoteClusterService = new RemoteClusterService(settings, this); + responseHandlers = transport.getResponseHandlers(); + if (clusterSettings != null) { + clusterSettings.addSettingsUpdateConsumer(TransportSettings.TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude); + clusterSettings.addSettingsUpdateConsumer(TransportSettings.TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude); + if (remoteClusterClient) { + remoteClusterService.listenForUpdates(clusterSettings); + } + clusterSettings.addSettingsUpdateConsumer(TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING, transport::setSlowLogThreshold); + } + registerRequestHandler( + HANDSHAKE_ACTION_NAME, + ThreadPool.Names.SAME, + false, + false, + HandshakeRequest::new, + (request, channel, task) -> channel.sendResponse(new HandshakeResponse(localNode, clusterName, localNode.getVersion())) + ); + } + + public RemoteClusterService getRemoteClusterService() { + return remoteClusterService; + } + + public DiscoveryNode getLocalNode() { + return localNode; + } + + public TaskManager getTaskManager() { + return taskManager; + } + + protected TaskManager createTaskManager( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + Set taskHeaders + ) { + if (clusterSettings != null) { + return TaskManager.createTaskManagerWithClusterSettings(settings, clusterSettings, threadPool, taskHeaders); + } else { + return new TaskManager(settings, threadPool, taskHeaders); + } + } + + /** + * The executor service for this transport service. + * + * @return the executor service + */ + private ExecutorService getExecutorService() { + return threadPool.generic(); + } + + void setTracerLogInclude(List tracerLogInclude) { + this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY); + } + + void setTracerLogExclude(List tracerLogExclude) { + this.tracerLogExclude = tracerLogExclude.toArray(Strings.EMPTY_ARRAY); + } + + @Override + protected void doStart() { + transport.setMessageListener(this); + connectionManager.addListener(this); + transport.start(); + if (transport.boundAddress() != null && logger.isInfoEnabled()) { + logger.info("{}", transport.boundAddress()); + for (Map.Entry entry : transport.profileBoundAddresses().entrySet()) { + logger.info("profile [{}]: {}", entry.getKey(), entry.getValue()); + } + } + localNode = localNodeFactory.apply(transport.boundAddress()); + + if (remoteClusterClient) { + // here we start to connect to the remote clusters + remoteClusterService.initializeRemoteClusters(); + } + } + + @Override + protected void doStop() { + try { + IOUtils.close(connectionManager, remoteClusterService, transport::stop); + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + // in case the transport is not connected to our local node (thus cleaned on node disconnect) + // make sure to clean any leftover on going handles + for (final Transport.ResponseContext holderToNotify : responseHandlers.prune(h -> true)) { + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + getExecutorService().execute(new AbstractRunnable() { + @Override + public void onRejection(Exception e) { + // if we get rejected during node shutdown we don't wanna bubble it up + logger.debug( + () -> new ParameterizedMessage( + "failed to notify response handler on rejection, action: {}", + holderToNotify.action() + ), + e + ); + } + + @Override + public void onFailure(Exception e) { + logger.warn( + () -> new ParameterizedMessage( + "failed to notify response handler on exception, action: {}", + holderToNotify.action() + ), + e + ); + } + + @Override + public void doRun() { + TransportException ex = new SendRequestTransportException( + holderToNotify.connection().getNode(), + holderToNotify.action(), + new NodeClosedException(localNode) + ); + holderToNotify.handler().handleException(ex); + } + }); + } + } + } + + @Override + protected void doClose() throws IOException { + transport.close(); + } + + /** + * start accepting incoming requests. + * when the transport layer starts up it will block any incoming requests until + * this method is called + */ + public final void acceptIncomingRequests() { + handleIncomingRequests.set(true); + } + + @Override + public ProtobufTransportInfo info() { + ProtobufBoundTransportAddress boundTransportAddress = boundAddress(); + if (boundTransportAddress == null) { + return null; + } + return new ProtobufTransportInfo(boundTransportAddress, transport.profileBoundAddresses()); + } + + public TransportStats stats() { + return transport.getStats(); + } + + public boolean isTransportSecure() { + return transport.isSecure(); + } + + public ProtobufBoundTransportAddress boundAddress() { + return transport.boundAddress(); + } + + public List getDefaultSeedAddresses() { + return transport.getDefaultSeedAddresses(); + } + + /** + * Returns true iff the given node is already connected. + */ + public boolean nodeConnected(DiscoveryNode node) { + return isLocalNode(node) || connectionManager.nodeConnected(node); + } + + /** + * Connect to the specified node with the default connection profile + * + * @param node the node to connect to + */ + public void connectToNode(DiscoveryNode node) throws ConnectTransportException { + connectToNode(node, (ConnectionProfile) null); + } + + // We are skipping node validation for extensibility as extensionNode and opensearchNode(LocalNode) will have different ephemeral id's + public void connectToExtensionNode(final DiscoveryNode node) { + PlainActionFuture.get(fut -> connectToExtensionNode(node, (ConnectionProfile) null, ActionListener.map(fut, x -> null))); + } + + /** + * Connect to the specified node with the given connection profile + * + * @param node the node to connect to + * @param connectionProfile the connection profile to use when connecting to this node + */ + public void connectToNode(final DiscoveryNode node, ConnectionProfile connectionProfile) { + PlainActionFuture.get(fut -> connectToNode(node, connectionProfile, ActionListener.map(fut, x -> null))); + } + + public void connectToExtensionNode(final DiscoveryNode node, ConnectionProfile connectionProfile) { + PlainActionFuture.get(fut -> connectToExtensionNode(node, connectionProfile, ActionListener.map(fut, x -> null))); + } + + /** + * Connect to the specified node with the given connection profile. + * The ActionListener will be called on the calling thread or the generic thread pool. + * + * @param node the node to connect to + * @param listener the action listener to notify + */ + public void connectToNode(DiscoveryNode node, ActionListener listener) throws ConnectTransportException { + connectToNode(node, null, listener); + } + + public void connectToExtensionNode(DiscoveryNode node, ActionListener listener) throws ConnectTransportException { + connectToExtensionNode(node, null, listener); + } + + /** + * Connect to the specified node with the given connection profile. + * The ActionListener will be called on the calling thread or the generic thread pool. + * + * @param node the node to connect to + * @param connectionProfile the connection profile to use when connecting to this node + * @param listener the action listener to notify + */ + public void connectToNode(final DiscoveryNode node, ConnectionProfile connectionProfile, ActionListener listener) { + if (isLocalNode(node)) { + listener.onResponse(null); + return; + } + connectionManager.connectToNode(node, connectionProfile, connectionValidator(node), listener); + } + + public void connectToExtensionNode(final DiscoveryNode node, ConnectionProfile connectionProfile, ActionListener listener) { + if (isLocalNode(node)) { + listener.onResponse(null); + return; + } + connectionManager.connectToNode(node, connectionProfile, extensionConnectionValidator(node), listener); + } + + public ConnectionManager.ConnectionValidator connectionValidator(DiscoveryNode node) { + return (newConnection, actualProfile, listener) -> { + // We don't validate cluster names to allow for CCS connections. + handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { + final DiscoveryNode remote = resp.discoveryNode; + + if (node.equals(remote) == false) { + throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); + } + + return null; + })); + }; + } + + public ConnectionManager.ConnectionValidator extensionConnectionValidator(DiscoveryNode node) { + return (newConnection, actualProfile, listener) -> { + // We don't validate cluster names to allow for CCS connections. + handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { + final DiscoveryNode remote = resp.discoveryNode; + logger.info("Connection validation was skipped"); + return null; + })); + }; + } + + /** + * Establishes and returns a new connection to the given node. The connection is NOT maintained by this service, it's the callers + * responsibility to close the connection once it goes out of scope. + * The ActionListener will be called on the calling thread or the generic thread pool. + * @param node the node to connect to + * @param connectionProfile the connection profile to use + */ + public Transport.Connection openConnection(final DiscoveryNode node, ConnectionProfile connectionProfile) { + return PlainActionFuture.get(fut -> openConnection(node, connectionProfile, fut)); + } + + /** + * Establishes a new connection to the given node. The connection is NOT maintained by this service, it's the callers + * responsibility to close the connection once it goes out of scope. + * The ActionListener will be called on the calling thread or the generic thread pool. + * @param node the node to connect to + * @param connectionProfile the connection profile to use + * @param listener the action listener to notify + */ + public void openConnection( + final DiscoveryNode node, + ConnectionProfile connectionProfile, + ActionListener listener + ) { + if (isLocalNode(node)) { + listener.onResponse(localNodeConnection); + } else { + connectionManager.openConnection(node, connectionProfile, listener); + } + } + + /** + * Executes a high-level handshake using the given connection + * and returns the discovery node of the node the connection + * was established with. The handshake will fail if the cluster + * name on the target node mismatches the local cluster name. + * The ActionListener will be called on the calling thread or the generic thread pool. + * + * @param connection the connection to a specific node + * @param handshakeTimeout handshake timeout + * @param listener action listener to notify + * @throws ConnectTransportException if the connection failed + * @throws IllegalStateException if the handshake failed + */ + public void handshake( + final Transport.Connection connection, + final long handshakeTimeout, + final ActionListener listener + ) { + handshake( + connection, + handshakeTimeout, + clusterName.getEqualityPredicate(), + ActionListener.map(listener, HandshakeResponse::getDiscoveryNode) + ); + } + + /** + * Executes a high-level handshake using the given connection + * and returns the discovery node of the node the connection + * was established with. The handshake will fail if the cluster + * name on the target node doesn't match the local cluster name. + * The ActionListener will be called on the calling thread or the generic thread pool. + * + * @param connection the connection to a specific node + * @param handshakeTimeout handshake timeout + * @param clusterNamePredicate cluster name validation predicate + * @param listener action listener to notify + * @throws IllegalStateException if the handshake failed + */ + public void handshake( + final Transport.Connection connection, + final long handshakeTimeout, + Predicate clusterNamePredicate, + final ActionListener listener + ) { + final DiscoveryNode node = connection.getNode(); + sendRequest( + connection, + HANDSHAKE_ACTION_NAME, + HandshakeRequest.INSTANCE, + TransportRequestOptions.builder().withTimeout(handshakeTimeout).build(), + new ActionListenerResponseHandler<>(new ActionListener() { + @Override + public void onResponse(HandshakeResponse response) { + if (clusterNamePredicate.test(response.clusterName) == false) { + listener.onFailure( + new IllegalStateException( + "handshake with [" + + node + + "] failed: remote cluster name [" + + response.clusterName.value() + + "] does not match " + + clusterNamePredicate + ) + ); + } else if (response.version.isCompatible(localNode.getVersion()) == false) { + listener.onFailure( + new IllegalStateException( + "handshake with [" + + node + + "] failed: remote node version [" + + response.version + + "] is incompatible with local node version [" + + localNode.getVersion() + + "]" + ) + ); + } else { + listener.onResponse(response); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, HandshakeResponse::new, ThreadPool.Names.GENERIC) + ); + } + + public ConnectionManager getConnectionManager() { + return connectionManager; + } + + /** + * Internal Handshake request + * + * @opensearch.internal + */ + static class HandshakeRequest extends TransportRequest { + + public static final HandshakeRequest INSTANCE = new HandshakeRequest(); + + HandshakeRequest(StreamInput in) throws IOException { + super(in); + } + + private HandshakeRequest() {} + + } + + /** + * Internal handshake response + * + * @opensearch.internal + */ + public static class HandshakeResponse extends TransportResponse { + private final DiscoveryNode discoveryNode; + private final ClusterName clusterName; + private final Version version; + + public HandshakeResponse(DiscoveryNode discoveryNode, ClusterName clusterName, Version version) { + this.discoveryNode = discoveryNode; + this.version = version; + this.clusterName = clusterName; + } + + public HandshakeResponse(StreamInput in) throws IOException { + super(in); + discoveryNode = in.readOptionalWriteable(DiscoveryNode::new); + clusterName = new ClusterName(in); + version = Version.readVersion(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(discoveryNode); + clusterName.writeTo(out); + Version.writeVersion(version, out); + } + + public DiscoveryNode getDiscoveryNode() { + return discoveryNode; + } + + public ClusterName getClusterName() { + return clusterName; + } + } + + public void disconnectFromNode(DiscoveryNode node) { + if (isLocalNode(node)) { + return; + } + connectionManager.disconnectFromNode(node); + } + + public void addMessageListener(TransportMessageListener listener) { + messageListener.listeners.add(listener); + } + + public boolean removeMessageListener(TransportMessageListener listener) { + return messageListener.listeners.remove(listener); + } + + public void addConnectionListener(TransportConnectionListener listener) { + connectionManager.addListener(listener); + } + + public void removeConnectionListener(TransportConnectionListener listener) { + connectionManager.removeListener(listener); + } + + public TransportFuture submitRequest( + DiscoveryNode node, + String action, + TransportRequest request, + TransportResponseHandler handler + ) throws TransportException { + return submitRequest(node, action, request, TransportRequestOptions.EMPTY, handler); + } + + public TransportFuture submitRequest( + DiscoveryNode node, + String action, + TransportRequest request, + TransportRequestOptions options, + TransportResponseHandler handler + ) throws TransportException { + PlainTransportFuture futureHandler = new PlainTransportFuture<>(handler); + try { + Transport.Connection connection = getConnection(node); + sendRequest(connection, action, request, options, futureHandler); + } catch (NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + futureHandler.handleException(ex); + } + return futureHandler; + } + + public void sendRequest( + final DiscoveryNode node, + final String action, + final TransportRequest request, + final TransportResponseHandler handler + ) { + final Transport.Connection connection; + try { + connection = getConnection(node); + } catch (final NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + handler.handleException(ex); + return; + } + sendRequest(connection, action, request, TransportRequestOptions.EMPTY, handler); + } + + public final void sendRequest( + final DiscoveryNode node, + final String action, + final TransportRequest request, + final TransportRequestOptions options, + TransportResponseHandler handler + ) { + final Transport.Connection connection; + try { + connection = getConnection(node); + } catch (final NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + handler.handleException(ex); + return; + } + sendRequest(connection, action, request, options, handler); + } + + /** + * Sends a request on the specified connection. If there is a failure sending the request, the specified handler is invoked. + * + * @param connection the connection to send the request on + * @param action the name of the action + * @param request the request + * @param options the options for this request + * @param handler the response handler + * @param the type of the transport response + */ + public final void sendRequest( + final Transport.Connection connection, + final String action, + final TransportRequest request, + final TransportRequestOptions options, + final TransportResponseHandler handler + ) { + try { + logger.debug("Action: " + action); + final TransportResponseHandler delegate; + if (request.getParentTask().isSet()) { + // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. + final Releasable unregisterChildNode = taskManager.registerChildNode(request.getParentTask().getId(), connection.getNode()); + delegate = new TransportResponseHandler() { + @Override + public void handleResponse(T response) { + unregisterChildNode.close(); + handler.handleResponse(response); + } + + @Override + public void handleException(TransportException exp) { + unregisterChildNode.close(); + handler.handleException(exp); + } + + @Override + public String executor() { + return handler.executor(); + } + + @Override + public T read(StreamInput in) throws IOException { + return handler.read(in); + } + + @Override + public String toString() { + return getClass().getName() + "/[" + action + "]:" + handler.toString(); + } + }; + } else { + delegate = handler; + } + asyncSender.sendRequest(connection, action, request, options, delegate); + } catch (final Exception ex) { + // the caller might not handle this so we invoke the handler + final TransportException te; + if (ex instanceof TransportException) { + te = (TransportException) ex; + } else { + te = new TransportException("failure to send", ex); + } + handler.handleException(te); + } + } + + /** + * Returns either a real transport connection or a local node connection if we are using the local node optimization. + * @throws NodeNotConnectedException if the given node is not connected + */ + public Transport.Connection getConnection(DiscoveryNode node) { + if (isLocalNode(node)) { + return localNodeConnection; + } else { + return connectionManager.getConnection(node); + } + } + + public final void sendChildRequest( + final DiscoveryNode node, + final String action, + final TransportRequest request, + final Task parentTask, + final TransportRequestOptions options, + final TransportResponseHandler handler + ) { + final Transport.Connection connection; + try { + connection = getConnection(node); + } catch (final NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + handler.handleException(ex); + return; + } + sendChildRequest(connection, action, request, parentTask, options, handler); + } + + public void sendChildRequest( + final Transport.Connection connection, + final String action, + final TransportRequest request, + final Task parentTask, + final TransportResponseHandler handler + ) { + sendChildRequest(connection, action, request, parentTask, TransportRequestOptions.EMPTY, handler); + } + + public void sendChildRequest( + final Transport.Connection connection, + final String action, + final TransportRequest request, + final Task parentTask, + final TransportRequestOptions options, + final TransportResponseHandler handler + ) { + request.setParentTask(localNode.getId(), parentTask.getId()); + sendRequest(connection, action, request, options, handler); + } + + private void sendRequestInternal( + final Transport.Connection connection, + final String action, + final TransportRequest request, + final TransportRequestOptions options, + TransportResponseHandler handler + ) { + if (connection == null) { + throw new IllegalStateException("can't send request to a null connection"); + } + DiscoveryNode node = connection.getNode(); + + Supplier storedContextSupplier = threadPool.getThreadContext().newRestorableContext(true); + ContextRestoreResponseHandler responseHandler = new ContextRestoreResponseHandler<>(storedContextSupplier, handler); + // TODO we can probably fold this entire request ID dance into connection.sendReqeust but it will be a bigger refactoring + final long requestId = responseHandlers.add(new Transport.ResponseContext<>(responseHandler, connection, action)); + final TimeoutHandler timeoutHandler; + if (options.timeout() != null) { + timeoutHandler = new TimeoutHandler(requestId, connection.getNode(), action); + responseHandler.setTimeoutHandler(timeoutHandler); + } else { + timeoutHandler = null; + } + try { + if (lifecycle.stoppedOrClosed()) { + /* + * If we are not started the exception handling will remove the request holder again and calls the handler to notify the + * caller. It will only notify if toStop hasn't done the work yet. + */ + throw new NodeClosedException(localNode); + } + if (timeoutHandler != null) { + assert options.timeout() != null; + timeoutHandler.scheduleTimeout(options.timeout()); + } + connection.sendRequest(requestId, action, request, options); // local node optimization happens upstream + } catch (final Exception e) { + // usually happen either because we failed to connect to the node + // or because we failed serializing the message + final Transport.ResponseContext contextToNotify = responseHandlers.remove(requestId); + // If holderToNotify == null then handler has already been taken care of. + if (contextToNotify != null) { + if (timeoutHandler != null) { + timeoutHandler.cancel(); + } + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows. In the special case of running into a closing node we run on the current + // thread on a best effort basis though. + final SendRequestTransportException sendRequestException = new SendRequestTransportException(node, action, e); + final String executor = lifecycle.stoppedOrClosed() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC; + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + public void onRejection(Exception e) { + // if we get rejected during node shutdown we don't wanna bubble it up + logger.debug( + () -> new ParameterizedMessage( + "failed to notify response handler on rejection, action: {}", + contextToNotify.action() + ), + e + ); + } + + @Override + public void onFailure(Exception e) { + logger.warn( + () -> new ParameterizedMessage( + "failed to notify response handler on exception, action: {}", + contextToNotify.action() + ), + e + ); + } + + @Override + protected void doRun() throws Exception { + contextToNotify.handler().handleException(sendRequestException); + } + }); + } else { + logger.debug("Exception while sending request, handler likely already notified due to timeout", e); + } + } + } + + private void sendLocalRequest(long requestId, final String action, final TransportRequest request, TransportRequestOptions options) { + final DirectResponseChannel channel = new DirectResponseChannel(localNode, action, requestId, this, threadPool); + try { + onRequestSent(localNode, requestId, action, request, options); + onRequestReceived(requestId, action); + final RequestHandlerRegistry reg = getRequestHandler(action); + if (reg == null) { + throw new ActionNotFoundTransportException("Action [" + action + "] not found"); + } + final String executor = reg.getExecutor(); + if (ThreadPool.Names.SAME.equals(executor)) { + // noinspection unchecked + reg.processMessageReceived(request, channel); + } else { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + // noinspection unchecked + reg.processMessageReceived(request, channel); + } + + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } + + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn( + () -> new ParameterizedMessage("failed to notify channel of error message for action [{}]", action), + inner + ); + } + } + + @Override + public String toString() { + return "processing of [" + requestId + "][" + action + "]: " + request; + } + }); + } + + } catch (Exception e) { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn(() -> new ParameterizedMessage("failed to notify channel of error message for action [{}]", action), inner); + } + } + } + + private boolean shouldTraceAction(String action) { + return shouldTraceAction(action, tracerLogInclude, tracerLogExclude); + } + + public static boolean shouldTraceAction(String action, String[] include, String[] exclude) { + if (include.length > 0) { + if (Regex.simpleMatch(include, action) == false) { + return false; + } + } + if (exclude.length > 0) { + return !Regex.simpleMatch(exclude, action); + } + return true; + } + + public TransportAddress[] addressesFromString(String address) throws UnknownHostException { + return transport.addressesFromString(address); + } + + /** + * A set of all valid action prefixes. + */ + public static final Set VALID_ACTION_PREFIXES = Collections.unmodifiableSet( + new HashSet<>( + Arrays.asList( + "indices:admin", + "indices:monitor", + "indices:data/write", + "indices:data/read", + "indices:internal", + "cluster:admin", + "cluster:monitor", + "cluster:internal", + "internal:" + ) + ) + ); + + private void validateActionName(String actionName) { + // TODO we should makes this a hard validation and throw an exception but we need a good way to add backwards layer + // for it. Maybe start with a deprecation layer + if (isValidActionName(actionName) == false) { + logger.warn("invalid action name [" + actionName + "] must start with one of: " + ProtobufTransportService.VALID_ACTION_PREFIXES); + } + } + + /** + * Returns true iff the action name starts with a valid prefix. + * + * @see #VALID_ACTION_PREFIXES + */ + public static boolean isValidActionName(String actionName) { + for (String prefix : VALID_ACTION_PREFIXES) { + if (actionName.startsWith(prefix)) { + return true; + } + } + return false; + } + + /** + * Registers a new request handler + * + * @param action The action the request handler is associated with + * @param requestReader a callable to be used construct new instances for streaming + * @param executor The executor the request handling will be executed on + * @param handler The handler itself that implements the request handling + */ + public void registerRequestHandler( + String action, + String executor, + Writeable.Reader requestReader, + TransportRequestHandler handler + ) { + validateActionName(action); + handler = interceptor.interceptHandler(action, executor, false, handler); + RequestHandlerRegistry reg = new RequestHandlerRegistry<>( + action, + requestReader, + taskManager, + handler, + executor, + false, + true + ); + transport.registerRequestHandler(reg); + } + + /** + * Registers a new request handler + * + * @param action The action the request handler is associated with + * @param requestReader The request class that will be used to construct new instances for streaming + * @param executor The executor the request handling will be executed on + * @param forceExecution Force execution on the executor queue and never reject it + * @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached. + * @param handler The handler itself that implements the request handling + */ + public void registerRequestHandler( + String action, + String executor, + boolean forceExecution, + boolean canTripCircuitBreaker, + Writeable.Reader requestReader, + TransportRequestHandler handler + ) { + validateActionName(action); + handler = interceptor.interceptHandler(action, executor, forceExecution, handler); + RequestHandlerRegistry reg = new RequestHandlerRegistry<>( + action, + requestReader, + taskManager, + handler, + executor, + forceExecution, + canTripCircuitBreaker + ); + transport.registerRequestHandler(reg); + } + + /** + * called by the {@link Transport} implementation when an incoming request arrives but before + * any parsing of it has happened (with the exception of the requestId and action) + */ + @Override + public void onRequestReceived(long requestId, String action) { + if (handleIncomingRequests.get() == false) { + throw new IllegalStateException("transport not ready yet to handle incoming requests"); + } + if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { + tracerLog.trace("[{}][{}] received request", requestId, action); + } + messageListener.onRequestReceived(requestId, action); + } + + /** called by the {@link Transport} implementation once a request has been sent */ + @Override + public void onRequestSent( + DiscoveryNode node, + long requestId, + String action, + TransportRequest request, + TransportRequestOptions options + ) { + if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { + tracerLog.trace("[{}][{}] sent to [{}] (timeout: [{}])", requestId, action, node, options.timeout()); + } + messageListener.onRequestSent(node, requestId, action, request, options); + } + + @Override + public void onResponseReceived(long requestId, Transport.ResponseContext holder) { + if (holder == null) { + checkForTimeout(requestId); + } else if (tracerLog.isTraceEnabled() && shouldTraceAction(holder.action())) { + tracerLog.trace("[{}][{}] received response from [{}]", requestId, holder.action(), holder.connection().getNode()); + } + messageListener.onResponseReceived(requestId, holder); + } + + /** called by the {@link Transport} implementation once a response was sent to calling node */ + @Override + public void onResponseSent(long requestId, String action, TransportResponse response) { + if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { + tracerLog.trace("[{}][{}] sent response", requestId, action); + } + messageListener.onResponseSent(requestId, action, response); + } + + /** called by the {@link Transport} implementation after an exception was sent as a response to an incoming request */ + @Override + public void onResponseSent(long requestId, String action, Exception e) { + if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { + tracerLog.trace(() -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); + } + messageListener.onResponseSent(requestId, action, e); + } + + public RequestHandlerRegistry getRequestHandler(String action) { + return transport.getRequestHandlers().getHandler(action); + } + + private void checkForTimeout(long requestId) { + // lets see if its in the timeout holder, but sync on mutex to make sure any ongoing timeout handling has finished + final DiscoveryNode sourceNode; + final String action; + assert responseHandlers.contains(requestId) == false; + TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId); + if (timeoutInfoHolder != null) { + long time = threadPool.relativeTimeInMillis(); + logger.warn( + "Received response for a request that has timed out, sent [{}ms] ago, timed out [{}ms] ago, " + + "action [{}], node [{}], id [{}]", + time - timeoutInfoHolder.sentTime(), + time - timeoutInfoHolder.timeoutTime(), + timeoutInfoHolder.action(), + timeoutInfoHolder.node(), + requestId + ); + action = timeoutInfoHolder.action(); + sourceNode = timeoutInfoHolder.node(); + } else { + logger.warn("Transport response handler not found of id [{}]", requestId); + action = null; + sourceNode = null; + } + // call tracer out of lock + if (tracerLog.isTraceEnabled() == false) { + return; + } + if (action == null) { + assert sourceNode == null; + tracerLog.trace("[{}] received response but can't resolve it to a request", requestId); + } else if (shouldTraceAction(action)) { + tracerLog.trace("[{}][{}] received response from [{}]", requestId, action, sourceNode); + } + } + + @Override + public void onConnectionClosed(Transport.Connection connection) { + try { + List> pruned = responseHandlers.prune( + h -> h.connection().getCacheKey().equals(connection.getCacheKey()) + ); + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + getExecutorService().execute(new Runnable() { + @Override + public void run() { + for (Transport.ResponseContext holderToNotify : pruned) { + holderToNotify.handler() + .handleException(new NodeDisconnectedException(connection.getNode(), holderToNotify.action())); + } + } + + @Override + public String toString() { + return "onConnectionClosed(" + connection.getNode() + ")"; + } + }); + } catch (OpenSearchRejectedExecutionException ex) { + logger.debug("Rejected execution on onConnectionClosed", ex); + } + } + + final class TimeoutHandler implements Runnable { + + private final long requestId; + private final long sentTime = threadPool.relativeTimeInMillis(); + private final String action; + private final DiscoveryNode node; + volatile Scheduler.Cancellable cancellable; + + TimeoutHandler(long requestId, DiscoveryNode node, String action) { + this.requestId = requestId; + this.node = node; + this.action = action; + } + + @Override + public void run() { + if (responseHandlers.contains(requestId)) { + long timeoutTime = threadPool.relativeTimeInMillis(); + timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(node, action, sentTime, timeoutTime)); + // now that we have the information visible via timeoutInfoHandlers, we try to remove the request id + final Transport.ResponseContext holder = responseHandlers.remove(requestId); + if (holder != null) { + assert holder.action().equals(action); + assert holder.connection().getNode().equals(node); + holder.handler() + .handleException( + new ReceiveTimeoutTransportException( + holder.connection().getNode(), + holder.action(), + "request_id [" + requestId + "] timed out after [" + (timeoutTime - sentTime) + "ms]" + ) + ); + } else { + // response was processed, remove timeout info. + timeoutInfoHandlers.remove(requestId); + } + } + } + + /** + * cancels timeout handling. this is a best effort only to avoid running it. remove the requestId from {@link #responseHandlers} + * to make sure this doesn't run. + */ + public void cancel() { + assert responseHandlers.contains(requestId) == false : "cancel must be called after the requestId [" + + requestId + + "] has been removed from clientHandlers"; + if (cancellable != null) { + cancellable.cancel(); + } + } + + @Override + public String toString() { + return "timeout handler for [" + requestId + "][" + action + "]"; + } + + private void scheduleTimeout(TimeValue timeout) { + this.cancellable = threadPool.schedule(this, timeout, ThreadPool.Names.GENERIC); + } + } + + /** + * Holder for timeout information + * + * @opensearch.internal + */ + static class TimeoutInfoHolder { + + private final DiscoveryNode node; + private final String action; + private final long sentTime; + private final long timeoutTime; + + TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) { + this.node = node; + this.action = action; + this.sentTime = sentTime; + this.timeoutTime = timeoutTime; + } + + public DiscoveryNode node() { + return node; + } + + public String action() { + return action; + } + + public long sentTime() { + return sentTime; + } + + public long timeoutTime() { + return timeoutTime; + } + } + + /** + * This handler wrapper ensures that the response thread executes with the correct thread context. Before any of the handle methods + * are invoked we restore the context. + */ + public static final class ContextRestoreResponseHandler implements TransportResponseHandler { + + private final TransportResponseHandler delegate; + private final Supplier contextSupplier; + private volatile TimeoutHandler handler; + + public ContextRestoreResponseHandler(Supplier contextSupplier, TransportResponseHandler delegate) { + this.delegate = delegate; + this.contextSupplier = contextSupplier; + } + + @Override + public T read(StreamInput in) throws IOException { + return delegate.read(in); + } + + @Override + public void handleResponse(T response) { + if (handler != null) { + handler.cancel(); + } + try (ThreadContext.StoredContext ignore = contextSupplier.get()) { + delegate.handleResponse(response); + } + } + + @Override + public void handleException(TransportException exp) { + if (handler != null) { + handler.cancel(); + } + try (ThreadContext.StoredContext ignore = contextSupplier.get()) { + delegate.handleException(exp); + } + } + + @Override + public String executor() { + return delegate.executor(); + } + + @Override + public String toString() { + return getClass().getName() + "/" + delegate.toString(); + } + + void setTimeoutHandler(TimeoutHandler handler) { + this.handler = handler; + } + + } + + /** + * A channel for a direct response + * + * @opensearch.internal + */ + static class DirectResponseChannel implements TransportChannel { + final DiscoveryNode localNode; + private final String action; + private final long requestId; + final ProtobufTransportService service; + final ThreadPool threadPool; + + DirectResponseChannel(DiscoveryNode localNode, String action, long requestId, ProtobufTransportService service, ThreadPool threadPool) { + this.localNode = localNode; + this.action = action; + this.requestId = requestId; + this.service = service; + this.threadPool = threadPool; + } + + @Override + public String getProfileName() { + return DIRECT_RESPONSE_PROFILE; + } + + @Override + public void sendResponse(TransportResponse response) throws IOException { + service.onResponseSent(requestId, action, response); + final TransportResponseHandler handler = service.responseHandlers.onResponseReceived(requestId, service); + // ignore if its null, the service logs it + if (handler != null) { + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + processResponse(handler, response); + } else { + threadPool.executor(executor).execute(new Runnable() { + @Override + public void run() { + processResponse(handler, response); + } + + @Override + public String toString() { + return "delivery of response to [" + requestId + "][" + action + "]: " + response; + } + }); + } + } + } + + @SuppressWarnings("unchecked") + protected void processResponse(TransportResponseHandler handler, TransportResponse response) { + try { + handler.handleResponse(response); + } catch (Exception e) { + processException(handler, wrapInRemote(new ResponseHandlerFailureTransportException(e))); + } + } + + @Override + public void sendResponse(Exception exception) throws IOException { + service.onResponseSent(requestId, action, exception); + final TransportResponseHandler handler = service.responseHandlers.onResponseReceived(requestId, service); + // ignore if its null, the service logs it + if (handler != null) { + final RemoteTransportException rtx = wrapInRemote(exception); + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + processException(handler, rtx); + } else { + threadPool.executor(handler.executor()).execute(new Runnable() { + @Override + public void run() { + processException(handler, rtx); + } + + @Override + public String toString() { + return "delivery of failure response to [" + requestId + "][" + action + "]: " + exception; + } + }); + } + } + } + + protected RemoteTransportException wrapInRemote(Exception e) { + if (e instanceof RemoteTransportException) { + return (RemoteTransportException) e; + } + return new RemoteTransportException(localNode.getName(), localNode.getAddress(), action, e); + } + + protected void processException(final TransportResponseHandler handler, final RemoteTransportException rtx) { + try { + handler.handleException(rtx); + } catch (Exception e) { + logger.error( + () -> new ParameterizedMessage("failed to handle exception for action [{}], handler [{}]", action, handler), + e + ); + } + } + + @Override + public String getChannelType() { + return "direct"; + } + + @Override + public Version getVersion() { + return localNode.getVersion(); + } + } + + /** + * Returns the internal thread pool + */ + public ThreadPool getThreadPool() { + return threadPool; + } + + private boolean isLocalNode(DiscoveryNode discoveryNode) { + return Objects.requireNonNull(discoveryNode, "discovery node must not be null").equals(localNode); + } + + private static final class DelegatingTransportMessageListener implements TransportMessageListener { + + private final List listeners = new CopyOnWriteArrayList<>(); + + @Override + public void onRequestReceived(long requestId, String action) { + for (TransportMessageListener listener : listeners) { + listener.onRequestReceived(requestId, action); + } + } + + @Override + public void onResponseSent(long requestId, String action, TransportResponse response) { + for (TransportMessageListener listener : listeners) { + listener.onResponseSent(requestId, action, response); + } + } + + @Override + public void onResponseSent(long requestId, String action, Exception error) { + for (TransportMessageListener listener : listeners) { + listener.onResponseSent(requestId, action, error); + } + } + + @Override + public void onRequestSent( + DiscoveryNode node, + long requestId, + String action, + TransportRequest request, + TransportRequestOptions finalOptions + ) { + for (TransportMessageListener listener : listeners) { + listener.onRequestSent(node, requestId, action, request, finalOptions); + } + } + + @Override + public void onResponseReceived(long requestId, Transport.ResponseContext holder) { + for (TransportMessageListener listener : listeners) { + listener.onResponseReceived(requestId, holder); + } + } + } +} From a1ab5891bdc32d75e8da2a58a2c9ad505ac923ba Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Thu, 25 May 2023 23:23:09 +0000 Subject: [PATCH 08/37] Fixing build and integrating protobuf for classes related to RestNodesAction Signed-off-by: Vacha Shah --- .../org/opensearch/core/rest/RestStatus.java | 10 + .../ProtobufOpenSearchException.java | 912 +++++++++++++++++- .../org/opensearch/action/ActionModule.java | 69 ++ ...ProtobufActionListenerResponseHandler.java | 73 ++ .../action/ProtobufActionRequestBuilder.java | 1 - .../opensearch/action/ProtobufActionType.java | 1 - .../cluster/node/info/ProtobufNodeInfo.java | 47 +- .../node/info/ProtobufNodesInfoAction.java | 26 + .../node/info/ProtobufNodesInfoRequest.java | 8 +- .../info/ProtobufNodesInfoRequestBuilder.java | 65 ++ .../node/info/ProtobufNodesInfoResponse.java | 8 +- .../node/info/ProtobufPluginsAndModules.java | 12 +- .../cluster/node/stats/ProtobufNodeStats.java | 322 +++++++ .../node/stats/ProtobufNodesStatsAction.java | 26 + .../node/stats/ProtobufNodesStatsRequest.java | 12 +- .../ProtobufNodesStatsRequestBuilder.java | 82 ++ .../stats/ProtobufNodesStatsResponse.java | 84 ++ .../state/ProtobufClusterStateAction.java | 1 - .../state/ProtobufClusterStateRequest.java | 16 +- .../ProtobufClusterStateRequestBuilder.java | 2 - .../state/ProtobufClusterStateResponse.java | 11 +- .../indices/stats/ProtobufCommonStats.java | 460 +++++++++ .../stats/ProtobufCommonStatsFlags.java | 29 +- .../action/support/IndicesOptions.java | 12 +- .../action/support/ProtobufActionFilter.java | 8 +- .../support/ProtobufActionFilterChain.java | 6 +- .../ProtobufChannelActionListener.java | 48 + .../support/ProtobufTransportAction.java | 47 +- ...terManagerNodeOperationRequestBuilder.java | 15 +- ...anagerNodeReadOperationRequestBuilder.java | 7 +- .../ProtobufClusterManagerNodeRequest.java | 8 +- .../nodes/ProtobufBaseNodesRequest.java | 20 +- .../nodes/ProtobufBaseNodesResponse.java | 8 +- .../ProtobufNodesOperationRequestBuilder.java | 49 + .../opensearch/client/ClusterAdminClient.java | 4 - .../org/opensearch/client/ProtobufClient.java | 39 +- .../client/ProtobufClusterAdminClient.java | 170 +--- .../client/ProtobufFilterClient.java | 74 ++ .../client/ProtobufOpenSearchClient.java | 4 - .../client/node/ProtobufNodeClient.java | 68 +- .../client/support/AbstractClient.java | 19 - .../support/ProtobufAbstractClient.java | 436 +-------- .../cluster/ProtobufAbstractDiffable.java | 88 ++ .../cluster/ProtobufClusterName.java | 15 + .../cluster/ProtobufClusterState.java | 62 +- .../cluster/ProtobufDiffableUtils.java | 20 +- .../ProtobufNamedDiffableValueSerializer.java | 4 +- .../ProtobufPendingClusterStateStats.java | 87 ++ .../ProtobufPublishClusterStateStats.java | 92 ++ .../cluster/node/ProtobufDiscoveryNode.java | 25 +- .../cluster/node/ProtobufDiscoveryNodes.java | 14 +- .../cluster/routing/RoutingNodes.java | 59 -- .../common/ProtobufFieldMemoryStats.java | 124 +++ .../ProtobufNamedWriteableRegistry.java | 121 +++ ...otobufNotSerializableExceptionWrapper.java | 74 ++ .../common/io/stream/ProtobufStreamInput.java | 232 +++-- .../io/stream/ProtobufStreamOutput.java | 265 ++++- .../common/util/concurrent/ThreadContext.java | 12 +- .../discovery/ProtobufDiscoveryStats.java | 99 ++ .../opensearch/http/ProtobufHttpStats.java | 67 ++ .../cache/query/ProtobufQueryCacheStats.java | 151 +++ .../request/ProtobufRequestCacheStats.java | 107 ++ .../index/engine/ProtobufSegmentsStats.java | 285 ++++++ .../fielddata/ProtobufFieldDataStats.java | 120 +++ .../index/flush/ProtobufFlushStats.java | 123 +++ .../index/get/ProtobufGetStats.java | 151 +++ .../index/merge/ProtobufMergeStats.java | 262 +++++ .../index/recovery/ProtobufRecoveryStats.java | 137 +++ .../index/refresh/ProtobufRefreshStats.java | 161 ++++ .../search/stats/ProtobufSearchStats.java | 444 +++++++++ .../index/shard/ProtobufDocsStats.java | 110 +++ .../index/shard/ProtobufIndexingStats.java | 286 ++++++ .../index/store/ProtobufStoreStats.java | 118 +++ .../index/translog/ProtobufTranslogStats.java | 137 +++ .../index/warmer/ProtobufWarmerStats.java | 118 +++ .../indices/ProtobufNodeIndicesStats.java | 165 ++++ .../ProtobufAllCircuitBreakerStats.java | 78 ++ .../breaker/ProtobufCircuitBreakerStats.java | 124 +++ .../ingest/ProtobufIngestStats.java | 351 +++++++ .../opensearch/monitor/fs/ProtobufFsInfo.java | 554 +++++++++++ .../monitor/jvm/ProtobufJvmInfo.java | 18 +- .../monitor/jvm/ProtobufJvmStats.java | 755 +++++++++++++++ .../opensearch/monitor/os/ProtobufOsInfo.java | 20 +- .../monitor/os/ProtobufOsStats.java | 610 ++++++++++++ .../monitor/process/ProtobufProcessStats.java | 193 ++++ .../node/ProtobufAdaptiveSelectionStats.java | 121 +++ .../node/ProtobufNodeClosedException.java | 31 + .../node/ResponseCollectorService.java | 130 ++- .../plugins/ProtobufPluginInfo.java | 8 +- .../rest/ClientAgnosticRestHandler.java | 27 + .../rest/ProtobufBaseRestHandler.java | 309 ++++++ .../opensearch/rest/ProtobufRestHandler.java | 259 +++++ .../java/org/opensearch/rest/RestHandler.java | 2 +- .../action/cat/ProtobufAbstractCatAction.java | 78 ++ .../action/cat/ProtobufRestNodesAction.java | 545 +++++++++++ .../rest/action/cat/RestNodesAction.java | 36 +- .../script/ProtobufScriptCacheStats.java | 152 +++ .../script/ProtobufScriptContextStats.java | 91 ++ .../script/ProtobufScriptStats.java | 127 +++ .../completion/ProtobufCompletionStats.java | 96 ++ .../ProtobufTaskCancellationService.java | 118 +-- .../opensearch/tasks/ProtobufTaskInfo.java | 14 +- .../tasks/ProtobufTaskListener.java | 40 + .../opensearch/tasks/ProtobufTaskManager.java | 31 +- .../ProtobufTaskResourceTrackingService.java | 3 +- .../opensearch/tasks/ProtobufTaskResult.java | 6 +- .../threadpool/ProtobufThreadPool.java | 12 +- .../threadpool/ProtobufThreadPoolInfo.java | 8 +- .../threadpool/ProtobufThreadPoolStats.java | 191 ++++ .../ProtobufActionTransportException.java | 88 ++ .../ProtobufBytesTransportRequest.java | 66 ++ .../ProtobufClusterConnectionManager.java | 290 ++++++ .../ProtobufConnectTransportException.java | 61 ++ .../transport/ProtobufConnectionManager.java | 114 +++ .../transport/ProtobufConnectionProfile.java | 362 +++++++ ...ProtobufEmptyTransportResponseHandler.java | 46 + .../transport/ProtobufNetworkMessage.java | 2 +- .../ProtobufNodeDisconnectedException.java | 37 + .../ProtobufNodeNotConnectedException.java | 30 + .../transport/ProtobufOutboundHandler.java | 281 ++++++ .../transport/ProtobufOutboundMessage.java | 80 +- .../ProtobufPlainTransportFuture.java | 103 ++ .../ProtobufProxyConnectionStrategy.java | 414 ++++++++ ...tobufReceiveTimeoutTransportException.java | 31 + .../ProtobufRemoteClusterAwareClient.java | 78 ++ .../ProtobufRemoteClusterAwareRequest.java | 29 + .../ProtobufRemoteClusterConnection.java | 229 +++++ .../ProtobufRemoteClusterService.java | 454 +++++++++ .../ProtobufRemoteConnectionInfo.java | 132 +++ .../ProtobufRemoteConnectionManager.java | 217 +++++ .../ProtobufRemoteConnectionStrategy.java | 484 ++++++++++ .../ProtobufRemoteTransportException.java | 42 + .../ProtobufRequestHandlerRegistry.java | 25 +- ...ProtobufSendRequestTransportException.java | 31 + .../ProtobufSniffConnectionStrategy.java | 628 ++++++++++++ .../ProtobufTcpTransportChannel.java | 10 +- .../transport/ProtobufTransport.java | 65 +- .../ProtobufTransportActionProxy.java | 226 +++++ .../ProtobufTransportConnectionListener.java | 43 + .../transport/ProtobufTransportException.java | 37 + .../ProtobufTransportInterceptor.java | 59 ++ .../ProtobufTransportRequestHandler.java | 3 +- .../ProtobufTransportResponseHandler.java | 57 ++ .../transport/ProtobufTransportService.java | 512 +++++----- .../transport/ProtobufTransportStats.java | 126 +++ .../org/opensearch/transport/TcpHeader.java | 19 + 146 files changed, 16824 insertions(+), 1479 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStats.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufChannelActionListener.java create mode 100644 server/src/main/java/org/opensearch/action/support/nodes/ProtobufNodesOperationRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/client/ProtobufFilterClient.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufAbstractDiffable.java create mode 100644 server/src/main/java/org/opensearch/cluster/coordination/ProtobufPendingClusterStateStats.java create mode 100644 server/src/main/java/org/opensearch/cluster/coordination/ProtobufPublishClusterStateStats.java create mode 100644 server/src/main/java/org/opensearch/common/ProtobufFieldMemoryStats.java create mode 100644 server/src/main/java/org/opensearch/common/io/stream/ProtobufNamedWriteableRegistry.java create mode 100644 server/src/main/java/org/opensearch/common/io/stream/ProtobufNotSerializableExceptionWrapper.java create mode 100644 server/src/main/java/org/opensearch/discovery/ProtobufDiscoveryStats.java create mode 100644 server/src/main/java/org/opensearch/http/ProtobufHttpStats.java create mode 100644 server/src/main/java/org/opensearch/index/cache/query/ProtobufQueryCacheStats.java create mode 100644 server/src/main/java/org/opensearch/index/cache/request/ProtobufRequestCacheStats.java create mode 100644 server/src/main/java/org/opensearch/index/engine/ProtobufSegmentsStats.java create mode 100644 server/src/main/java/org/opensearch/index/fielddata/ProtobufFieldDataStats.java create mode 100644 server/src/main/java/org/opensearch/index/flush/ProtobufFlushStats.java create mode 100644 server/src/main/java/org/opensearch/index/get/ProtobufGetStats.java create mode 100644 server/src/main/java/org/opensearch/index/merge/ProtobufMergeStats.java create mode 100644 server/src/main/java/org/opensearch/index/recovery/ProtobufRecoveryStats.java create mode 100644 server/src/main/java/org/opensearch/index/refresh/ProtobufRefreshStats.java create mode 100644 server/src/main/java/org/opensearch/index/search/stats/ProtobufSearchStats.java create mode 100644 server/src/main/java/org/opensearch/index/shard/ProtobufDocsStats.java create mode 100644 server/src/main/java/org/opensearch/index/shard/ProtobufIndexingStats.java create mode 100644 server/src/main/java/org/opensearch/index/store/ProtobufStoreStats.java create mode 100644 server/src/main/java/org/opensearch/index/translog/ProtobufTranslogStats.java create mode 100644 server/src/main/java/org/opensearch/index/warmer/ProtobufWarmerStats.java create mode 100644 server/src/main/java/org/opensearch/indices/ProtobufNodeIndicesStats.java create mode 100644 server/src/main/java/org/opensearch/indices/breaker/ProtobufAllCircuitBreakerStats.java create mode 100644 server/src/main/java/org/opensearch/indices/breaker/ProtobufCircuitBreakerStats.java create mode 100644 server/src/main/java/org/opensearch/ingest/ProtobufIngestStats.java create mode 100644 server/src/main/java/org/opensearch/monitor/fs/ProtobufFsInfo.java create mode 100644 server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmStats.java create mode 100644 server/src/main/java/org/opensearch/monitor/os/ProtobufOsStats.java create mode 100644 server/src/main/java/org/opensearch/monitor/process/ProtobufProcessStats.java create mode 100644 server/src/main/java/org/opensearch/node/ProtobufAdaptiveSelectionStats.java create mode 100644 server/src/main/java/org/opensearch/node/ProtobufNodeClosedException.java create mode 100644 server/src/main/java/org/opensearch/rest/ClientAgnosticRestHandler.java create mode 100644 server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java create mode 100644 server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java create mode 100644 server/src/main/java/org/opensearch/rest/action/cat/ProtobufAbstractCatAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java create mode 100644 server/src/main/java/org/opensearch/script/ProtobufScriptCacheStats.java create mode 100644 server/src/main/java/org/opensearch/script/ProtobufScriptContextStats.java create mode 100644 server/src/main/java/org/opensearch/script/ProtobufScriptStats.java create mode 100644 server/src/main/java/org/opensearch/search/suggest/completion/ProtobufCompletionStats.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskListener.java create mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolStats.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufActionTransportException.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufBytesTransportRequest.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufConnectTransportException.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufConnectionProfile.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufEmptyTransportResponseHandler.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufNodeDisconnectedException.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufNodeNotConnectedException.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufPlainTransportFuture.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufProxyConnectionStrategy.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufReceiveTimeoutTransportException.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareRequest.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionInfo.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufRemoteTransportException.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufSendRequestTransportException.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportException.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportResponseHandler.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportStats.java diff --git a/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java b/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java index ae4f4c65b28d2..466d550938505 100644 --- a/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java +++ b/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java @@ -32,6 +32,8 @@ package org.opensearch.core.rest; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -533,6 +535,14 @@ public static void writeTo(StreamOutput out, RestStatus status) throws IOExcepti out.writeString(status.name()); } + public static RestStatus readFromProtobuf(CodedInputStream in) throws IOException { + return RestStatus.valueOf(in.readString()); + } + + public static void writeToProtobuf(CodedOutputStream out, RestStatus status) throws IOException { + out.writeStringNoTag(status.name()); + } + public static RestStatus status(int successfulShards, int totalShards, ShardOperationFailedException... failures) { if (failures.length == 0) { if (successfulShards == 0 && totalShards > 0) { diff --git a/server/src/main/java/org/opensearch/ProtobufOpenSearchException.java b/server/src/main/java/org/opensearch/ProtobufOpenSearchException.java index 30c9e8be2cfb6..572ae2efc6d47 100644 --- a/server/src/main/java/org/opensearch/ProtobufOpenSearchException.java +++ b/server/src/main/java/org/opensearch/ProtobufOpenSearchException.java @@ -10,36 +10,926 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.CheckedFunction; +import org.opensearch.common.Nullable; +import org.opensearch.core.ParseField; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.logging.LoggerMessageFormat; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParseException; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.Index; +import org.opensearch.index.shard.ShardId; +import org.opensearch.rest.RestStatus; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableMap; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE; +import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.opensearch.common.xcontent.XContentParserUtils.ensureFieldName; /** - * Base exception for a failed node + * A base class for all opensearch exceptions. * * @opensearch.internal */ -public class ProtobufOpenSearchException extends RuntimeException implements ProtobufWriteable { +public class ProtobufOpenSearchException extends RuntimeException implements ToXContentFragment, ProtobufWriteable { + + private static final Version UNKNOWN_VERSION_ADDED = Version.fromId(0); + + /** + * Setting a higher base exception id to avoid conflicts. + */ + private static final int CUSTOM_ELASTICSEARCH_EXCEPTIONS_BASE_ID = 10000; + + /** + * Passed in the {@link Params} of {@link #generateThrowableXContent(XContentBuilder, Params, Throwable)} + * to control if the {@code caused_by} element should render. Unlike most parameters to {@code toXContent} methods this parameter is + * internal only and not available as a URL parameter. + */ + private static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip"; + /** + * Passed in the {@link Params} of {@link #generateThrowableXContent(XContentBuilder, Params, Throwable)} + * to control if the {@code stack_trace} element should render. Unlike most parameters to {@code toXContent} methods this parameter is + * internal only and not available as a URL parameter. Use the {@code error_trace} parameter instead. + */ + public static final String REST_EXCEPTION_SKIP_STACK_TRACE = "rest.exception.stacktrace.skip"; + public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true; + private static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false; + private static final String INDEX_METADATA_KEY = "opensearch.index"; + private static final String INDEX_METADATA_KEY_UUID = "opensearch.index_uuid"; + private static final String SHARD_METADATA_KEY = "opensearch.shard"; + private static final String RESOURCE_METADATA_TYPE_KEY = "opensearch.resource.type"; + private static final String RESOURCE_METADATA_ID_KEY = "opensearch.resource.id"; + + private static final String TYPE = "type"; + private static final String REASON = "reason"; + private static final String CAUSED_BY = "caused_by"; + private static final ParseField SUPPRESSED = new ParseField("suppressed"); + public static final String STACK_TRACE = "stack_trace"; + private static final String HEADER = "header"; + private static final String ERROR = "error"; + private static final String ROOT_CAUSE = "root_cause"; + + private static final Map> ID_TO_SUPPLIER; + private static final Map, OpenSearchExceptionHandle> CLASS_TO_OPENSEARCH_EXCEPTION_HANDLE; + + private static final Pattern OS_METADATA = Pattern.compile("^opensearch\\."); + + private final Map> metadata = new HashMap<>(); + private final Map> headers = new HashMap<>(); - private String message; + /** + * Construct a ProtobufOpenSearchException with the specified cause exception. + */ + public ProtobufOpenSearchException(Throwable cause) { + super(cause); + } + + /** + * Construct a ProtobufOpenSearchException with the specified detail message. + * + * The message can be parameterized using {} as placeholders for the given + * arguments + * + * @param msg the detail message + * @param args the arguments for the message + */ + public ProtobufOpenSearchException(String msg, Object... args) { + super(LoggerMessageFormat.format(msg, args)); + } - public ProtobufOpenSearchException(String message) { - super(message); - this.message = message; + /** + * Construct a ProtobufOpenSearchException with the specified detail message + * and nested exception. + * + * The message can be parameterized using {} as placeholders for the given + * arguments + * + * @param msg the detail message + * @param cause the nested exception + * @param args the arguments for the message + */ + public ProtobufOpenSearchException(String msg, Throwable cause, Object... args) { + super(LoggerMessageFormat.format(msg, args), cause); } public ProtobufOpenSearchException(CodedInputStream in) throws IOException { - super(in.readString()); - this.message = in.readString(); + super(new ProtobufStreamInput(in).readOptionalString(), new ProtobufStreamInput(in).readException()); + readStackTrace(this, in); + // headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); + // metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); + } + + /** + * Adds a new piece of metadata with the given key. + * If the provided key is already present, the corresponding metadata will be replaced + */ + public void addMetadata(String key, String... values) { + addMetadata(key, Arrays.asList(values)); + } + + /** + * Adds a new piece of metadata with the given key. + * If the provided key is already present, the corresponding metadata will be replaced + */ + public void addMetadata(String key, List values) { + // we need to enforce this otherwise bw comp doesn't work properly, as "opensearch." + // was the previous criteria to split headers in two sets + if (key.startsWith("opensearch.") == false) { + throw new IllegalArgumentException("exception metadata must start with [opensearch.], found [" + key + "] instead"); + } + this.metadata.put(key, values); + } + + /** + * Returns a set of all metadata keys on this exception + */ + public Set getMetadataKeys() { + return metadata.keySet(); + } + + /** + * Returns the list of metadata values for the given key or {@code null} if no metadata for the + * given key exists. + */ + public List getMetadata(String key) { + return metadata.get(key); + } + + protected Map> getMetadata() { + return metadata; + } + + /** + * Adds a new header with the given key. + * This method will replace existing header if a header with the same key already exists + */ + public void addHeader(String key, List value) { + // we need to enforce this otherwise bw comp doesn't work properly, as "opensearch." + // was the previous criteria to split headers in two sets + if (key.startsWith("opensearch.")) { + throw new IllegalArgumentException("exception headers must not start with [opensearch.], found [" + key + "] instead"); + } + this.headers.put(key, value); + } + + /** + * Adds a new header with the given key. + * This method will replace existing header if a header with the same key already exists + */ + public void addHeader(String key, String... value) { + addHeader(key, Arrays.asList(value)); + } + + /** + * Returns a set of all header keys on this exception + */ + public Set getHeaderKeys() { + return headers.keySet(); + } + + /** + * Returns the list of header values for the given key or {@code null} if no header for the + * given key exists. + */ + public List getHeader(String key) { + return headers.get(key); + } + + protected Map> getHeaders() { + return headers; + } + + /** + * Returns the rest status code associated with this exception. + */ + public RestStatus status() { + Throwable cause = unwrapCause(); + if (cause == this) { + return RestStatus.INTERNAL_SERVER_ERROR; + } else { + return ExceptionsHelper.status(cause); + } + } + + /** + * Unwraps the actual cause from the exception for cases when the exception is a + * {@link OpenSearchWrapperException}. + * + * @see ExceptionsHelper#unwrapCause(Throwable) + */ + public Throwable unwrapCause() { + return ExceptionsHelper.unwrapCause(this); + } + + /** + * Return the detail message, including the message from the nested exception + * if there is one. + */ + public String getDetailedMessage() { + if (getCause() != null) { + StringBuilder sb = new StringBuilder(); + sb.append(toString()).append("; "); + if (getCause() instanceof ProtobufOpenSearchException) { + sb.append(((ProtobufOpenSearchException) getCause()).getDetailedMessage()); + } else { + sb.append(getCause()); + } + return sb.toString(); + } else { + return super.toString(); + } + } + + /** + * Retrieve the innermost cause of this exception, if none, returns the current exception. + */ + public Throwable getRootCause() { + Throwable rootCause = this; + Throwable cause = getCause(); + while (cause != null && cause != rootCause) { + rootCause = cause; + cause = cause.getCause(); + } + return rootCause; } @Override public void writeTo(CodedOutputStream out) throws IOException { - out.writeStringNoTag(this.getMessage()); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeOptionalString(this.getMessage()); + protobufStreamOutput.writeException(this.getCause()); + writeStackTraces(this, out, protobufStreamOutput); + // out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); + // out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString); + } + + public static ProtobufOpenSearchException readException(CodedInputStream input, int id) throws IOException { + CheckedFunction opensearchException = ID_TO_SUPPLIER.get(id); + if (opensearchException == null) { + throw new IllegalStateException("unknown exception for id: " + id); + } + return opensearchException.apply(input); + } + + /** + * Returns true iff the given class is a registered for an exception to be read. + */ + public static boolean isRegistered(Class exception, Version version) { + OpenSearchExceptionHandle openSearchExceptionHandle = CLASS_TO_OPENSEARCH_EXCEPTION_HANDLE.get(exception); + if (openSearchExceptionHandle != null) { + return version.onOrAfter(openSearchExceptionHandle.versionAdded); + } + return false; + } + + static Set> getRegisteredKeys() { // for testing + return CLASS_TO_OPENSEARCH_EXCEPTION_HANDLE.keySet(); + } + + /** + * Returns the serialization id the given exception. + */ + public static int getId(Class exception) { + return CLASS_TO_OPENSEARCH_EXCEPTION_HANDLE.get(exception).id; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + Throwable ex = ExceptionsHelper.unwrapCause(this); + if (ex != this) { + generateThrowableXContent(builder, params, this); + } else { + innerToXContent(builder, params, this, getExceptionName(), getMessage(), headers, metadata, getCause()); + } + return builder; + } + + protected static void innerToXContent( + XContentBuilder builder, + Params params, + Throwable throwable, + String type, + String message, + Map> headers, + Map> metadata, + Throwable cause + ) throws IOException { + builder.field(TYPE, type); + builder.field(REASON, message); + + for (Map.Entry> entry : metadata.entrySet()) { + headerToXContent(builder, entry.getKey().substring("opensearch.".length()), entry.getValue()); + } + + if (throwable instanceof ProtobufOpenSearchException) { + ProtobufOpenSearchException exception = (ProtobufOpenSearchException) throwable; + exception.metadataToXContent(builder, params); + } + + if (params.paramAsBoolean(REST_EXCEPTION_SKIP_CAUSE, REST_EXCEPTION_SKIP_CAUSE_DEFAULT) == false) { + if (cause != null) { + builder.field(CAUSED_BY); + builder.startObject(); + generateThrowableXContent(builder, params, cause); + builder.endObject(); + } + } + + if (headers.isEmpty() == false) { + builder.startObject(HEADER); + for (Map.Entry> entry : headers.entrySet()) { + headerToXContent(builder, entry.getKey(), entry.getValue()); + } + builder.endObject(); + } + + if (params.paramAsBoolean(REST_EXCEPTION_SKIP_STACK_TRACE, REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT) == false) { + builder.field(STACK_TRACE, ExceptionsHelper.stackTrace(throwable)); + } + + Throwable[] allSuppressed = throwable.getSuppressed(); + if (allSuppressed.length > 0) { + builder.startArray(SUPPRESSED.getPreferredName()); + for (Throwable suppressed : allSuppressed) { + builder.startObject(); + generateThrowableXContent(builder, params, suppressed); + builder.endObject(); + } + builder.endArray(); + } + } + + private static void headerToXContent(XContentBuilder builder, String key, List values) throws IOException { + if (values != null && values.isEmpty() == false) { + if (values.size() == 1) { + builder.field(key, values.get(0)); + } else { + builder.startArray(key); + for (String value : values) { + builder.value(value); + } + builder.endArray(); + } + } + } + + /** + * Renders additional per exception information into the XContent + */ + protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException {} + + /** + * Generate a {@link ProtobufOpenSearchException} from a {@link XContentParser}. This does not + * return the original exception type (ie NodeClosedException for example) but just wraps + * the type, the reason and the cause of the exception. It also recursively parses the + * tree structure of the cause, returning it as a tree structure of {@link ProtobufOpenSearchException} + * instances. + */ + public static ProtobufOpenSearchException fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + return innerFromXContent(parser, false); + } + + public static ProtobufOpenSearchException innerFromXContent(XContentParser parser, boolean parseRootCauses) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + + String type = null, reason = null, stack = null; + ProtobufOpenSearchException cause = null; + Map> metadata = new HashMap<>(); + Map> headers = new HashMap<>(); + List rootCauses = new ArrayList<>(); + List suppressed = new ArrayList<>(); + + for (; token == XContentParser.Token.FIELD_NAME; token = parser.nextToken()) { + String currentFieldName = parser.currentName(); + token = parser.nextToken(); + + if (token.isValue()) { + if (TYPE.equals(currentFieldName)) { + type = parser.text(); + } else if (REASON.equals(currentFieldName)) { + reason = parser.text(); + } else if (STACK_TRACE.equals(currentFieldName)) { + stack = parser.text(); + } else if (token == XContentParser.Token.VALUE_STRING) { + metadata.put(currentFieldName, Collections.singletonList(parser.text())); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (CAUSED_BY.equals(currentFieldName)) { + cause = fromXContent(parser); + } else if (HEADER.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + List values = headers.getOrDefault(currentFieldName, new ArrayList<>()); + if (token == XContentParser.Token.VALUE_STRING) { + values.add(parser.text()); + } else if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + values.add(parser.text()); + } else { + parser.skipChildren(); + } + } + } else if (token == XContentParser.Token.START_OBJECT) { + parser.skipChildren(); + } + headers.put(currentFieldName, values); + } + } + } else { + // Any additional metadata object added by the metadataToXContent method is ignored + // and skipped, so that the parser does not fail on unknown fields. The parser only + // support metadata key-pairs and metadata arrays of values. + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (parseRootCauses && ROOT_CAUSE.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + rootCauses.add(fromXContent(parser)); + } + } else if (SUPPRESSED.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + suppressed.add(fromXContent(parser)); + } + } else { + // Parse the array and add each item to the corresponding list of metadata. + // Arrays of objects are not supported yet and just ignored and skipped. + List values = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + values.add(parser.text()); + } else { + parser.skipChildren(); + } + } + if (values.size() > 0) { + if (metadata.containsKey(currentFieldName)) { + values.addAll(metadata.get(currentFieldName)); + } + metadata.put(currentFieldName, values); + } + } + } + } + + ProtobufOpenSearchException e = new ProtobufOpenSearchException(buildMessage(type, reason, stack), cause); + for (Map.Entry> entry : metadata.entrySet()) { + // subclasses can print out additional metadata through the metadataToXContent method. Simple key-value pairs will be + // parsed back and become part of this metadata set, while objects and arrays are not supported when parsing back. + // Those key-value pairs become part of the metadata set and inherit the "opensearch." prefix as that is currently required + // by addMetadata. The prefix will get stripped out when printing metadata out so it will be effectively invisible. + // TODO move subclasses that print out simple metadata to using addMetadata directly and support also numbers and booleans. + // TODO rename metadataToXContent and have only SearchPhaseExecutionException use it, which prints out complex objects + e.addMetadata("opensearch." + entry.getKey(), entry.getValue()); + } + for (Map.Entry> header : headers.entrySet()) { + e.addHeader(header.getKey(), header.getValue()); + } + + // Adds root causes as suppressed exception. This way they are not lost + // after parsing and can be retrieved using getSuppressed() method. + for (ProtobufOpenSearchException rootCause : rootCauses) { + e.addSuppressed(rootCause); + } + for (ProtobufOpenSearchException s : suppressed) { + e.addSuppressed(s); + } + return e; + } + + /** + * Static toXContent helper method that renders {@link ProtobufOpenSearchException} or {@link Throwable} instances + * as XContent, delegating the rendering to {@link #toXContent(XContentBuilder, Params)} + * or {@link #innerToXContent(XContentBuilder, Params, Throwable, String, String, Map, Map, Throwable)}. + * + * This method is usually used when the {@link Throwable} is rendered as a part of another XContent object, and its result can + * be parsed back using the {@link #fromXContent(XContentParser)} method. + */ + public static void generateThrowableXContent(XContentBuilder builder, Params params, Throwable t) throws IOException { + t = ExceptionsHelper.unwrapCause(t); + + if (t instanceof ProtobufOpenSearchException) { + ((ProtobufOpenSearchException) t).toXContent(builder, params); + } else { + innerToXContent(builder, params, t, getExceptionName(t), t.getMessage(), emptyMap(), emptyMap(), t.getCause()); + } + } + + /** + * Render any exception as a xcontent, encapsulated within a field or object named "error". The level of details that are rendered + * depends on the value of the "detailed" parameter: when it's false only a simple message based on the type and message of the + * exception is rendered. When it's true all detail are provided including guesses root causes, cause and potentially stack + * trace. + * + * This method is usually used when the {@link Exception} is rendered as a full XContent object, and its output can be parsed + * by the {@link #failureFromXContent(XContentParser)} method. + */ + public static void generateFailureXContent(XContentBuilder builder, Params params, @Nullable Exception e, boolean detailed) + throws IOException { + // No exception to render as an error + if (e == null) { + builder.field(ERROR, "unknown"); + return; + } + + // Render the exception with a simple message + if (detailed == false) { + Throwable t = e; + for (int counter = 0; counter < 10 && t != null; counter++) { + if (t instanceof ProtobufOpenSearchException) { + break; + } + t = t.getCause(); + } + builder.field(ERROR, ExceptionsHelper.summaryMessage(t != null ? t : e)); + return; + } + + // Render the exception with all details + final ProtobufOpenSearchException[] rootCauses = ProtobufOpenSearchException.guessRootCauses(e); + builder.startObject(ERROR); + { + builder.startArray(ROOT_CAUSE); + for (ProtobufOpenSearchException rootCause : rootCauses) { + builder.startObject(); + rootCause.toXContent(builder, new DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_CAUSE, "true"), params)); + builder.endObject(); + } + builder.endArray(); + } + generateThrowableXContent(builder, params, e); + builder.endObject(); + } + + /** + * Parses the output of {@link #generateFailureXContent(XContentBuilder, Params, Exception, boolean)} + */ + public static ProtobufOpenSearchException failureFromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureFieldName(parser, token, ERROR); + + token = parser.nextToken(); + if (token.isValue()) { + return new ProtobufOpenSearchException(buildMessage("exception", parser.text(), null)); + } + + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + token = parser.nextToken(); + + // Root causes are parsed in the innerFromXContent() and are added as suppressed exceptions. + return innerFromXContent(parser, true); + } + + /** + * Returns the root cause of this exception or multiple if different shards caused different exceptions + */ + public ProtobufOpenSearchException[] guessRootCauses() { + final Throwable cause = getCause(); + if (cause != null && cause instanceof ProtobufOpenSearchException) { + return ((ProtobufOpenSearchException) cause).guessRootCauses(); + } + return new ProtobufOpenSearchException[] { this }; + } + + /** + * Returns the root cause of this exception or multiple if different shards caused different exceptions. + * If the given exception is not an instance of {@link ProtobufOpenSearchException} an empty array + * is returned. + */ + public static ProtobufOpenSearchException[] guessRootCauses(Throwable t) { + Throwable ex = ExceptionsHelper.unwrapCause(t); + if (ex instanceof ProtobufOpenSearchException) { + // ProtobufOpenSearchException knows how to guess its own root cause + return ((ProtobufOpenSearchException) ex).guessRootCauses(); + } + if (ex instanceof XContentParseException) { + /* + * We'd like to unwrap parsing exceptions to the inner-most + * parsing exception because that is generally the most interesting + * exception to return to the user. If that exception is caused by + * an ProtobufOpenSearchException we'd like to keep unwrapping because + * ProtobufOpenSearchException instances tend to contain useful information + * for the user. + */ + Throwable cause = ex.getCause(); + if (cause != null) { + if (cause instanceof XContentParseException || cause instanceof ProtobufOpenSearchException) { + return guessRootCauses(ex.getCause()); + } + } + } + return new ProtobufOpenSearchException[] { new ProtobufOpenSearchException(ex.getMessage(), ex) { + @Override + protected String getExceptionName() { + return getExceptionName(getCause()); + } + } }; + } + + protected String getExceptionName() { + return getExceptionName(this); + } + + /** + * Returns an underscore case name for the given exception. This method strips {@code OpenSearch} prefixes from exception names. + */ + public static String getExceptionName(Throwable ex) { + String simpleName = ex.getClass().getSimpleName(); + if (simpleName.startsWith("OpenSearch")) { + simpleName = simpleName.substring("OpenSearch".length()); + } + // TODO: do we really need to make the exception name in underscore casing? + return toUnderscoreCase(simpleName); + } + + static String buildMessage(String type, String reason, String stack) { + StringBuilder message = new StringBuilder("OpenSearch exception ["); + message.append(TYPE).append('=').append(type).append(", "); + message.append(REASON).append('=').append(reason); + if (stack != null) { + message.append(", ").append(STACK_TRACE).append('=').append(stack); + } + message.append(']'); + return message.toString(); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + if (metadata.containsKey(INDEX_METADATA_KEY)) { + builder.append(getIndex()); + if (metadata.containsKey(SHARD_METADATA_KEY)) { + builder.append('[').append(getShardId()).append(']'); + } + builder.append(' '); + } + return builder.append(ExceptionsHelper.detailedMessage(this).trim()).toString(); + } + + /** + * Deserializes stacktrace elements as well as suppressed exceptions from the given output stream and + * adds it to the given exception. + */ + public static T readStackTrace(T throwable, CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + throwable.setStackTrace(protobufStreamInput.readArray(i -> { + final String declaringClasss = i.readString(); + final String fileName = protobufStreamInput.readOptionalString(); + final String methodName = i.readString(); + final int lineNumber = i.readInt32(); + return new StackTraceElement(declaringClasss, methodName, fileName, lineNumber); + }, StackTraceElement[]::new)); + + int numSuppressed = protobufStreamInput.readVInt(); + for (int i = 0; i < numSuppressed; i++) { + throwable.addSuppressed(protobufStreamInput.readException()); + } + return throwable; + } + + /** + * Serializes the given exceptions stacktrace elements as well as it's suppressed exceptions to the given output stream. + */ + public static T writeStackTraces(T throwable, CodedOutputStream out, ProtobufStreamOutput protobufStreamOutput) + throws IOException { + protobufStreamOutput.writeArray((o, v) -> { + o.writeStringNoTag(v.getClassName()); + protobufStreamOutput.writeOptionalString(v.getFileName()); + o.writeStringNoTag(v.getMethodName()); + o.writeInt32NoTag(v.getLineNumber()); + }, throwable.getStackTrace()); + protobufStreamOutput.writeArray((o, v) -> { o.writeStringNoTag(v.toString()); }, throwable.getSuppressed()); + return throwable; + } + + /** + * This is the list of Exceptions OpenSearch can throw over the wire or save into a corruption marker. Each value in the enum is a + * single exception tying the Class to an id for use of the encode side and the id back to a constructor for use on the decode side. As + * such its ok if the exceptions to change names so long as their constructor can still read the exception. Each exception is listed + * in id order below. If you want to remove an exception leave a tombstone comment and mark the id as null in + * ExceptionSerializationTests.testIds.ids. + */ + private enum OpenSearchExceptionHandle { + NODE_CLOSED_EXCEPTION( + org.opensearch.node.ProtobufNodeClosedException.class, + org.opensearch.node.ProtobufNodeClosedException::new, + 9, + UNKNOWN_VERSION_ADDED + ), + CONNECT_TRANSPORT_EXCEPTION( + org.opensearch.transport.ProtobufConnectTransportException.class, + org.opensearch.transport.ProtobufConnectTransportException::new, + 12, + UNKNOWN_VERSION_ADDED + ), + ACTION_TRANSPORT_EXCEPTION( + org.opensearch.transport.ProtobufActionTransportException.class, + org.opensearch.transport.ProtobufActionTransportException::new, + 20, + UNKNOWN_VERSION_ADDED + ), + TRANSPORT_EXCEPTION( + org.opensearch.transport.ProtobufTransportException.class, + org.opensearch.transport.ProtobufTransportException::new, + 34, + UNKNOWN_VERSION_ADDED + ), + FAILED_NODE_EXCEPTION( + org.opensearch.action.ProtobufFailedNodeException.class, + org.opensearch.action.ProtobufFailedNodeException::new, + 71, + UNKNOWN_VERSION_ADDED + ), + RECEIVE_TIMEOUT_TRANSPORT_EXCEPTION( + org.opensearch.transport.ProtobufReceiveTimeoutTransportException.class, + org.opensearch.transport.ProtobufReceiveTimeoutTransportException::new, + 83, + UNKNOWN_VERSION_ADDED + ), + NODE_DISCONNECTED_EXCEPTION( + org.opensearch.transport.ProtobufNodeDisconnectedException.class, + org.opensearch.transport.ProtobufNodeDisconnectedException::new, + 84, + UNKNOWN_VERSION_ADDED + ), + REMOTE_TRANSPORT_EXCEPTION( + org.opensearch.transport.ProtobufRemoteTransportException.class, + org.opensearch.transport.ProtobufRemoteTransportException::new, + 103, + UNKNOWN_VERSION_ADDED + ), + NODE_NOT_CONNECTED_EXCEPTION( + org.opensearch.transport.ProtobufNodeNotConnectedException.class, + org.opensearch.transport.ProtobufNodeNotConnectedException::new, + 134, + UNKNOWN_VERSION_ADDED + ); + + final Class exceptionClass; + final CheckedFunction constructor; + final int id; + final Version versionAdded; + + OpenSearchExceptionHandle( + Class exceptionClass, + CheckedFunction constructor, + int id, + Version versionAdded + ) { + // We need the exceptionClass because you can't dig it out of the constructor reliably. + this.exceptionClass = exceptionClass; + this.constructor = constructor; + this.versionAdded = versionAdded; + this.id = id; + } + } + + /** + * Returns an array of all registered handle IDs. These are the IDs for every registered + * exception. + * + * @return an array of all registered handle IDs + */ + static int[] ids() { + return Arrays.stream(OpenSearchExceptionHandle.values()).mapToInt(h -> h.id).toArray(); + } + + /** + * Returns an array of all registered pairs of handle IDs and exception classes. These pairs are + * provided for every registered exception. + * + * @return an array of all registered pairs of handle IDs and exception classes + */ + static Tuple>[] classes() { + @SuppressWarnings("unchecked") + final Tuple>[] ts = Arrays.stream(OpenSearchExceptionHandle.values()) + .map(h -> Tuple.tuple(h.id, h.exceptionClass)) + .toArray(Tuple[]::new); + return ts; + } + + static { + ID_TO_SUPPLIER = unmodifiableMap( + Arrays.stream(OpenSearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.id, e -> e.constructor)) + ); + CLASS_TO_OPENSEARCH_EXCEPTION_HANDLE = unmodifiableMap( + Arrays.stream(OpenSearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.exceptionClass, e -> e)) + ); + } + + public Index getIndex() { + List index = getMetadata(INDEX_METADATA_KEY); + if (index != null && index.isEmpty() == false) { + List index_uuid = getMetadata(INDEX_METADATA_KEY_UUID); + return new Index(index.get(0), index_uuid.get(0)); + } + + return null; + } + + public ShardId getShardId() { + List shard = getMetadata(SHARD_METADATA_KEY); + if (shard != null && shard.isEmpty() == false) { + return new ShardId(getIndex(), Integer.parseInt(shard.get(0))); + } + return null; + } + + public void setIndex(Index index) { + if (index != null) { + addMetadata(INDEX_METADATA_KEY, index.getName()); + addMetadata(INDEX_METADATA_KEY_UUID, index.getUUID()); + } + } + + public void setIndex(String index) { + if (index != null) { + setIndex(new Index(index, INDEX_UUID_NA_VALUE)); + } + } + + public void setShard(ShardId shardId) { + if (shardId != null) { + setIndex(shardId.getIndex()); + addMetadata(SHARD_METADATA_KEY, Integer.toString(shardId.id())); + } + } + + public void setResources(String type, String... id) { + assert type != null; + addMetadata(RESOURCE_METADATA_ID_KEY, id); + addMetadata(RESOURCE_METADATA_TYPE_KEY, type); + } + + public List getResourceId() { + return getMetadata(RESOURCE_METADATA_ID_KEY); + } + + public String getResourceType() { + List header = getMetadata(RESOURCE_METADATA_TYPE_KEY); + if (header != null && header.isEmpty() == false) { + assert header.size() == 1; + return header.get(0); + } + return null; } - public String getMessage() { - return this.message; + // lower cases and adds underscores to transitions in a name + private static String toUnderscoreCase(String value) { + StringBuilder sb = new StringBuilder(); + boolean changed = false; + for (int i = 0; i < value.length(); i++) { + char c = value.charAt(i); + if (Character.isUpperCase(c)) { + if (!changed) { + // copy it over here + for (int j = 0; j < i; j++) { + sb.append(value.charAt(j)); + } + changed = true; + if (i == 0) { + sb.append(Character.toLowerCase(c)); + } else { + sb.append('_'); + sb.append(Character.toLowerCase(c)); + } + } else { + sb.append('_'); + sb.append(Character.toLowerCase(c)); + } + } else { + if (changed) { + sb.append(c); + } + } + } + if (!changed) { + return value; + } + return sb.toString(); } } diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index b775095861150..0a6ba0dae57ca 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -274,6 +274,7 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.AutoCreateIndex; import org.opensearch.action.support.DestructiveOperations; +import org.opensearch.action.support.ProtobufTransportAction; import org.opensearch.action.support.TransportAction; import org.opensearch.action.termvectors.MultiTermVectorsAction; import org.opensearch.action.termvectors.TermVectorsAction; @@ -1169,4 +1170,72 @@ public RestSendToExtensionAction get(RestHandler.Route route) { return routeRegistry.get(route); } } + + /** + * The DynamicActionRegistry maintains a registry mapping {@link ActionType} instances to {@link TransportAction} instances. + *

+ * This class is modeled after {@link NamedRegistry} but provides both register and unregister capabilities. + * + * @opensearch.internal + */ + public static class ProtobufDynamicActionRegistry { + // This is the unmodifiable actions map created during node bootstrap, which + // will continue to link ActionType and TransportAction pairs from core and plugin + // action handler registration. + private Map actions = Collections.emptyMap(); + // A dynamic registry to add or remove ActionType / TransportAction pairs + // at times other than node bootstrap. + private final Map, ProtobufTransportAction> registry = new ConcurrentHashMap<>(); + + /** + * Register the immutable actions in the registry. + * + * @param actions The injected map of {@link ProtobufActionType} to {@link ProtobufTransportAction} + */ + public void registerUnmodifiableActionMap(Map actions) { + this.actions = actions; + } + + /** + * Add a dynamic action to the registry. + * + * @param action The action instance to add + * @param transportAction The corresponding instance of transportAction to execute + */ + public void registerDynamicAction(ProtobufActionType action, ProtobufTransportAction transportAction) { + requireNonNull(action, "action is required"); + requireNonNull(transportAction, "transportAction is required"); + if (actions.containsKey(action) || registry.putIfAbsent(action, transportAction) != null) { + throw new IllegalArgumentException("action [" + action.name() + "] already registered"); + } + } + + /** + * Remove a dynamic action from the registry. + * + * @param action The action to remove + */ + public void unregisterDynamicAction(ProtobufActionType action) { + requireNonNull(action, "action is required"); + if (registry.remove(action) == null) { + throw new IllegalArgumentException("action [" + action.name() + "] was not registered"); + } + } + + /** + * Gets the {@link ProtobufTransportAction} instance corresponding to the {@link ProtobufActionType} instance. + * + * @param action The {@link ProtobufActionType}. + * @return the corresponding {@link ProtobufTransportAction} if it is registered, null otherwise. + */ + @SuppressWarnings("unchecked") + public ProtobufTransportAction get( + ProtobufActionType action + ) { + if (actions.containsKey(action)) { + return actions.get(action); + } + return registry.get(action); + } + } } diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java b/server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java new file mode 100644 index 0000000000000..4bae1f6c3bb04 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java @@ -0,0 +1,73 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.transport.ProtobufTransportResponseHandler; +import org.opensearch.transport.ProtobufTransportException; +import org.opensearch.transport.ProtobufTransportResponse; + +import java.io.IOException; +import java.util.Objects; + +/** + * A simple base class for action response listeners, defaulting to using the SAME executor (as its +* very common on response handlers). +* +* @opensearch.api +*/ +public class ProtobufActionListenerResponseHandler + implements + ProtobufTransportResponseHandler { + + private final ActionListener listener; + private final ProtobufWriteable.Reader reader; + private final String executor; + + public ProtobufActionListenerResponseHandler( + ActionListener listener, + ProtobufWriteable.Reader reader, + String executor + ) { + this.listener = Objects.requireNonNull(listener); + this.reader = Objects.requireNonNull(reader); + this.executor = Objects.requireNonNull(executor); + } + + public ProtobufActionListenerResponseHandler(ActionListener listener, ProtobufWriteable.Reader reader) { + this(listener, reader, ProtobufThreadPool.Names.SAME); + } + + @Override + public void handleResponse(Response response) { + listener.onResponse(response); + } + + @Override + public void handleException(ProtobufTransportException e) { + listener.onFailure(e); + } + + @Override + public String executor() { + return executor; + } + + @Override + public Response read(CodedInputStream in) throws IOException { + return reader.read(in); + } + + @Override + public String toString() { + return super.toString() + "/" + listener; + } +} diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java b/server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java index 33c0e46f5fc92..6c752e24c4f2c 100644 --- a/server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java @@ -8,7 +8,6 @@ package org.opensearch.action; -import org.opensearch.client.OpenSearchClient; import org.opensearch.client.ProtobufOpenSearchClient; import org.opensearch.common.unit.TimeValue; diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionType.java b/server/src/main/java/org/opensearch/action/ProtobufActionType.java index 911419fb67677..c8fc92a3e9f35 100644 --- a/server/src/main/java/org/opensearch/action/ProtobufActionType.java +++ b/server/src/main/java/org/opensearch/action/ProtobufActionType.java @@ -9,7 +9,6 @@ package org.opensearch.action; import org.opensearch.common.io.stream.ProtobufWriteable; -import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; import org.opensearch.transport.TransportRequestOptions; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java index 0d1b03ae29d07..df059711196cd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java @@ -66,7 +66,7 @@ public class ProtobufNodeInfo extends ProtobufBaseNodeResponse { public ProtobufNodeInfo(CodedInputStream in) throws IOException { super(in); - ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); version = Version.readVersionProtobuf(in); build = Build.readBuildProtobuf(in); if (in.readBool()) { @@ -77,20 +77,17 @@ public ProtobufNodeInfo(CodedInputStream in) throws IOException { if (in.readBool()) { settings = Settings.readSettingsFromStreamProtobuf(in); } - addInfoIfNonNull(ProtobufOsInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufOsInfo::new, in)); - addInfoIfNonNull(ProtobufProcessInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufProcessInfo::new, in)); - addInfoIfNonNull(ProtobufJvmInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufJvmInfo::new, in)); - addInfoIfNonNull(ProtobufThreadPoolInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufThreadPoolInfo::new, in)); - addInfoIfNonNull(ProtobufTransportInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufTransportInfo::new, in)); - addInfoIfNonNull(ProtobufHttpInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufHttpInfo::new, in)); - addInfoIfNonNull(ProtobufPluginsAndModules.class, protobufStreamInput.readOptionalWriteable(ProtobufPluginsAndModules::new, in)); - addInfoIfNonNull(ProtobufIngestInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufIngestInfo::new, in)); - addInfoIfNonNull(ProtobufAggregationInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufAggregationInfo::new, in)); + addInfoIfNonNull(ProtobufOsInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufOsInfo::new)); + addInfoIfNonNull(ProtobufProcessInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufProcessInfo::new)); + addInfoIfNonNull(ProtobufJvmInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufJvmInfo::new)); + addInfoIfNonNull(ProtobufThreadPoolInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufThreadPoolInfo::new)); + addInfoIfNonNull(ProtobufTransportInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufTransportInfo::new)); + addInfoIfNonNull(ProtobufHttpInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufHttpInfo::new)); + addInfoIfNonNull(ProtobufPluginsAndModules.class, protobufStreamInput.readOptionalWriteable(ProtobufPluginsAndModules::new)); + addInfoIfNonNull(ProtobufIngestInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufIngestInfo::new)); + addInfoIfNonNull(ProtobufAggregationInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufAggregationInfo::new)); if (protobufStreamInput.getVersion().onOrAfter(Version.V_2_7_0)) { - addInfoIfNonNull( - ProtobufSearchPipelineInfo.class, - protobufStreamInput.readOptionalWriteable(ProtobufSearchPipelineInfo::new, in) - ); + addInfoIfNonNull(ProtobufSearchPipelineInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufSearchPipelineInfo::new)); } } @@ -190,7 +187,7 @@ private void addInfoIfNonNull( @Override public void writeTo(CodedOutputStream out) throws IOException { super.writeTo(out); - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); out.writeInt32NoTag(version.id); Build.writeBuildProtobuf(build, out); if (totalIndexingBuffer == null) { @@ -205,17 +202,17 @@ public void writeTo(CodedOutputStream out) throws IOException { out.writeBoolNoTag(true); Settings.writeSettingsToStreamProtobuf(settings, out); } - protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufOsInfo.class), out); - protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufProcessInfo.class), out); - protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufJvmInfo.class), out); - protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufThreadPoolInfo.class), out); - protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufTransportInfo.class), out); - protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufHttpInfo.class), out); - protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufPluginsAndModules.class), out); - protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufIngestInfo.class), out); - protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufAggregationInfo.class), out); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufOsInfo.class)); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufProcessInfo.class)); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufJvmInfo.class)); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufThreadPoolInfo.class)); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufTransportInfo.class)); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufHttpInfo.class)); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufPluginsAndModules.class)); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufIngestInfo.class)); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufAggregationInfo.class)); if (protobufStreamOutput.getVersion().onOrAfter(Version.V_2_7_0)) { - protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufSearchPipelineInfo.class), out); + protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufSearchPipelineInfo.class)); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoAction.java new file mode 100644 index 0000000000000..7cb1201f2ccef --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoAction.java @@ -0,0 +1,26 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.info; + +import org.opensearch.action.ProtobufActionType; + +/** + * Transport action for OpenSearch Node Information +* +* @opensearch.internal +*/ +public class ProtobufNodesInfoAction extends ProtobufActionType { + + public static final ProtobufNodesInfoAction INSTANCE = new ProtobufNodesInfoAction(); + public static final String NAME = "cluster:monitor/nodes/info"; + + private ProtobufNodesInfoAction() { + super(NAME, ProtobufNodesInfoResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java index 04de76bb281fe..2fb3c3319822b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java @@ -45,9 +45,9 @@ public class ProtobufNodesInfoRequest extends ProtobufBaseNodesRequest { + + public ProtobufNodesInfoRequestBuilder(ProtobufOpenSearchClient client, ProtobufNodesInfoAction action) { + super(client, action, new ProtobufNodesInfoRequest()); + } + + /** + * Clears all info flags. + */ + public ProtobufNodesInfoRequestBuilder clear() { + request.clear(); + return this; + } + + /** + * Sets to return all the data. + */ + public ProtobufNodesInfoRequestBuilder all() { + request.all(); + return this; + } + + /** + * Add a single metric to the request. + * + * @param metric Name of metric as a string. + * @return This, for request chaining. + */ + public ProtobufNodesInfoRequestBuilder addMetric(String metric) { + request.addMetric(metric); + return this; + } + + /** + * Add an array of metrics to the request. + * + * @param metrics Metric names as strings. + * @return This, for request chaining. + */ + public ProtobufNodesInfoRequestBuilder addMetrics(String... metrics) { + request.addMetrics(metrics); + return this; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java index a7d5274fcf09a..2f82367e0c9e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java @@ -64,13 +64,13 @@ public ProtobufNodesInfoResponse( @Override protected List readNodesFrom(CodedInputStream in) throws IOException { - ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); - return protobufStreamInput.readList(ProtobufNodeInfo::new, in); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + return protobufStreamInput.readList(ProtobufNodeInfo::new); } @Override protected void writeNodesTo(CodedOutputStream out, List nodes) throws IOException { - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); - protobufStreamOutput.writeCollection(nodes, (o, v) -> v.writeTo(o), out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeCollection(nodes, (o, v) -> v.writeTo(o)); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java index 1df8383a78736..c1117c4321544 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java @@ -37,16 +37,16 @@ public ProtobufPluginsAndModules(List plugins, List v.writeTo(o), out); - protobufStreamOutput.writeCollection(modules, (o, v) -> v.writeTo(o), out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeCollection(plugins, (o, v) -> v.writeTo(o)); + protobufStreamOutput.writeCollection(modules, (o, v) -> v.writeTo(o)); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java new file mode 100644 index 0000000000000..8a96eb41bb1b7 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java @@ -0,0 +1,322 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.stats; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.action.support.nodes.ProtobufBaseNodeResponse; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.discovery.ProtobufDiscoveryStats; +import org.opensearch.http.ProtobufHttpStats; +import org.opensearch.indices.ProtobufNodeIndicesStats; +import org.opensearch.indices.breaker.ProtobufAllCircuitBreakerStats; +import org.opensearch.ingest.ProtobufIngestStats; +import org.opensearch.monitor.fs.ProtobufFsInfo; +import org.opensearch.monitor.jvm.ProtobufJvmStats; +import org.opensearch.monitor.os.ProtobufOsStats; +import org.opensearch.monitor.process.ProtobufProcessStats; +import org.opensearch.node.ProtobufAdaptiveSelectionStats; +import org.opensearch.script.ProtobufScriptStats; +import org.opensearch.threadpool.ProtobufThreadPoolStats; +import org.opensearch.transport.ProtobufTransportStats; + +import java.io.IOException; +import java.util.Map; + +/** + * Node statistics (dynamic, changes depending on when created). +* +* @opensearch.internal +*/ +public class ProtobufNodeStats extends ProtobufBaseNodeResponse implements ToXContentFragment { + + private long timestamp; + + @Nullable + private ProtobufNodeIndicesStats indices; + + @Nullable + private ProtobufOsStats os; + + @Nullable + private ProtobufProcessStats process; + + @Nullable + private ProtobufJvmStats jvm; + + @Nullable + private ProtobufThreadPoolStats threadPool; + + @Nullable + private ProtobufFsInfo fs; + + @Nullable + private ProtobufTransportStats transport; + + @Nullable + private ProtobufHttpStats http; + + @Nullable + private ProtobufAllCircuitBreakerStats breaker; + + @Nullable + private ProtobufScriptStats scriptStats; + + @Nullable + private ProtobufDiscoveryStats discoveryStats; + + @Nullable + private ProtobufIngestStats ingestStats; + + @Nullable + private ProtobufAdaptiveSelectionStats adaptiveSelectionStats; + + public ProtobufNodeStats(CodedInputStream in) throws IOException { + super(in); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + timestamp = in.readInt64(); + if (in.readBool()) { + indices = new ProtobufNodeIndicesStats(in); + } + os = protobufStreamInput.readOptionalWriteable(ProtobufOsStats::new); + process = protobufStreamInput.readOptionalWriteable(ProtobufProcessStats::new); + jvm = protobufStreamInput.readOptionalWriteable(ProtobufJvmStats::new); + threadPool = protobufStreamInput.readOptionalWriteable(ProtobufThreadPoolStats::new); + fs = protobufStreamInput.readOptionalWriteable(ProtobufFsInfo::new); + transport = protobufStreamInput.readOptionalWriteable(ProtobufTransportStats::new); + http = protobufStreamInput.readOptionalWriteable(ProtobufHttpStats::new); + breaker = protobufStreamInput.readOptionalWriteable(ProtobufAllCircuitBreakerStats::new); + scriptStats = protobufStreamInput.readOptionalWriteable(ProtobufScriptStats::new); + discoveryStats = protobufStreamInput.readOptionalWriteable(ProtobufDiscoveryStats::new); + ingestStats = protobufStreamInput.readOptionalWriteable(ProtobufIngestStats::new); + adaptiveSelectionStats = protobufStreamInput.readOptionalWriteable(ProtobufAdaptiveSelectionStats::new); + } + + public ProtobufNodeStats( + ProtobufDiscoveryNode node, + long timestamp, + @Nullable ProtobufNodeIndicesStats indices, + @Nullable ProtobufOsStats os, + @Nullable ProtobufProcessStats process, + @Nullable ProtobufJvmStats jvm, + @Nullable ProtobufThreadPoolStats threadPool, + @Nullable ProtobufFsInfo fs, + @Nullable ProtobufTransportStats transport, + @Nullable ProtobufHttpStats http, + @Nullable ProtobufAllCircuitBreakerStats breaker, + @Nullable ProtobufScriptStats scriptStats, + @Nullable ProtobufDiscoveryStats discoveryStats, + @Nullable ProtobufIngestStats ingestStats, + @Nullable ProtobufAdaptiveSelectionStats adaptiveSelectionStats + ) { + super(node); + this.timestamp = timestamp; + this.indices = indices; + this.os = os; + this.process = process; + this.jvm = jvm; + this.threadPool = threadPool; + this.fs = fs; + this.transport = transport; + this.http = http; + this.breaker = breaker; + this.scriptStats = scriptStats; + this.discoveryStats = discoveryStats; + this.ingestStats = ingestStats; + this.adaptiveSelectionStats = adaptiveSelectionStats; + } + + public long getTimestamp() { + return this.timestamp; + } + + @Nullable + public String getHostname() { + return getNode().getHostName(); + } + + /** + * Indices level stats. + */ + @Nullable + public ProtobufNodeIndicesStats getIndices() { + return this.indices; + } + + /** + * Operating System level statistics. + */ + @Nullable + public ProtobufOsStats getOs() { + return this.os; + } + + /** + * Process level statistics. + */ + @Nullable + public ProtobufProcessStats getProcess() { + return process; + } + + /** + * JVM level statistics. + */ + @Nullable + public ProtobufJvmStats getJvm() { + return jvm; + } + + /** + * Thread Pool level statistics. + */ + @Nullable + public ProtobufThreadPoolStats getThreadPool() { + return this.threadPool; + } + + /** + * File system level stats. + */ + @Nullable + public ProtobufFsInfo getFs() { + return fs; + } + + @Nullable + public ProtobufTransportStats getTransport() { + return this.transport; + } + + @Nullable + public ProtobufHttpStats getHttp() { + return this.http; + } + + @Nullable + public ProtobufAllCircuitBreakerStats getBreaker() { + return this.breaker; + } + + @Nullable + public ProtobufScriptStats getScriptStats() { + return this.scriptStats; + } + + @Nullable + public ProtobufDiscoveryStats getDiscoveryStats() { + return this.discoveryStats; + } + + @Nullable + public ProtobufIngestStats getIngestStats() { + return ingestStats; + } + + @Nullable + public ProtobufAdaptiveSelectionStats getAdaptiveSelectionStats() { + return adaptiveSelectionStats; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + out.writeInt64NoTag(timestamp); + if (indices == null) { + out.writeBoolNoTag(false); + } else { + out.writeBoolNoTag(true); + indices.writeTo(out); + } + protobufStreamOutput.writeOptionalWriteable(os); + protobufStreamOutput.writeOptionalWriteable(process); + protobufStreamOutput.writeOptionalWriteable(jvm); + protobufStreamOutput.writeOptionalWriteable(threadPool); + protobufStreamOutput.writeOptionalWriteable(fs); + protobufStreamOutput.writeOptionalWriteable(transport); + protobufStreamOutput.writeOptionalWriteable(http); + protobufStreamOutput.writeOptionalWriteable(breaker); + protobufStreamOutput.writeOptionalWriteable(scriptStats); + protobufStreamOutput.writeOptionalWriteable(discoveryStats); + protobufStreamOutput.writeOptionalWriteable(ingestStats); + protobufStreamOutput.writeOptionalWriteable(adaptiveSelectionStats); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + + builder.field("name", getNode().getName()); + builder.field("transport_address", getNode().getAddress().toString()); + builder.field("host", getNode().getHostName()); + builder.field("ip", getNode().getAddress()); + + builder.startArray("roles"); + for (DiscoveryNodeRole role : getNode().getRoles()) { + builder.value(role.roleName()); + } + builder.endArray(); + + if (!getNode().getAttributes().isEmpty()) { + builder.startObject("attributes"); + for (Map.Entry attrEntry : getNode().getAttributes().entrySet()) { + builder.field(attrEntry.getKey(), attrEntry.getValue()); + } + builder.endObject(); + } + + if (getIndices() != null) { + getIndices().toXContent(builder, params); + } + if (getOs() != null) { + getOs().toXContent(builder, params); + } + if (getProcess() != null) { + getProcess().toXContent(builder, params); + } + if (getJvm() != null) { + getJvm().toXContent(builder, params); + } + if (getThreadPool() != null) { + getThreadPool().toXContent(builder, params); + } + if (getFs() != null) { + getFs().toXContent(builder, params); + } + if (getTransport() != null) { + getTransport().toXContent(builder, params); + } + if (getHttp() != null) { + getHttp().toXContent(builder, params); + } + if (getBreaker() != null) { + getBreaker().toXContent(builder, params); + } + if (getScriptStats() != null) { + getScriptStats().toXContent(builder, params); + } + if (getDiscoveryStats() != null) { + getDiscoveryStats().toXContent(builder, params); + } + if (getIngestStats() != null) { + getIngestStats().toXContent(builder, params); + } + if (getAdaptiveSelectionStats() != null) { + getAdaptiveSelectionStats().toXContent(builder, params); + } + + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsAction.java new file mode 100644 index 0000000000000..a03ee6119f0b2 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsAction.java @@ -0,0 +1,26 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.stats; + +import org.opensearch.action.ProtobufActionType; + +/** + * Transport action for obtaining OpenSearch Node Stats +* +* @opensearch.internal +*/ +public class ProtobufNodesStatsAction extends ProtobufActionType { + + public static final ProtobufNodesStatsAction INSTANCE = new ProtobufNodesStatsAction(); + public static final String NAME = "cluster:monitor/nodes/stats"; + + private ProtobufNodesStatsAction() { + super(NAME, ProtobufNodesStatsResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java index d55b511eb0170..c61deaf87c413 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java @@ -10,12 +10,10 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; -import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.ProtobufCommonStatsFlags; -import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.ProtobufBaseNodesRequest; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; import java.io.IOException; import java.util.Arrays; import java.util.HashSet; @@ -43,7 +41,8 @@ public ProtobufNodesStatsRequest(CodedInputStream in) throws IOException { indices = new ProtobufCommonStatsFlags(in); requestedMetrics.clear(); - requestedMetrics.addAll(in.readStringList()); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + requestedMetrics.addAll(protobufStreamInput.readList(CodedInputStream::readString)); } /** @@ -153,7 +152,8 @@ public ProtobufNodesStatsRequest removeMetric(String metric) { public void writeTo(CodedOutputStream out) throws IOException { super.writeTo(out); indices.writeTo(out); - out.writeStringArray(requestedMetrics.toArray(new String[0])); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeStringArray(requestedMetrics.toArray(new String[0])); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequestBuilder.java new file mode 100644 index 0000000000000..3c6d14ad478d5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequestBuilder.java @@ -0,0 +1,82 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.stats; + +import org.opensearch.action.admin.indices.stats.ProtobufCommonStatsFlags; +import org.opensearch.action.support.nodes.ProtobufNodesOperationRequestBuilder; +import org.opensearch.client.ProtobufOpenSearchClient; + +/** + * Transport builder for obtaining OpenSearch Node Stats +* +* @opensearch.internal +*/ +public class ProtobufNodesStatsRequestBuilder extends ProtobufNodesOperationRequestBuilder< + ProtobufNodesStatsRequest, + ProtobufNodesStatsResponse, + ProtobufNodesStatsRequestBuilder> { + + public ProtobufNodesStatsRequestBuilder(ProtobufOpenSearchClient client, ProtobufNodesStatsAction action) { + super(client, action, new ProtobufNodesStatsRequest()); + } + + /** + * Sets all the request flags. + */ + public ProtobufNodesStatsRequestBuilder all() { + request.all(); + return this; + } + + /** + * Clears all stats flags. + */ + public ProtobufNodesStatsRequestBuilder clear() { + request.clear(); + return this; + } + + /** + * Add a single metric to the request. + * + * @param metric Name of metric as a string. + * @return This, for request chaining. + */ + public ProtobufNodesStatsRequestBuilder addMetric(String metric) { + request.addMetric(metric); + return this; + } + + /** + * Add an array of metrics to the request. + * + * @param metrics Metric names as strings. + * @return This, for request chaining. + */ + public ProtobufNodesStatsRequestBuilder addMetrics(String... metrics) { + request.addMetrics(metrics); + return this; + } + + /** + * Should the node indices stats be returned. + */ + public ProtobufNodesStatsRequestBuilder setIndices(boolean indices) { + request.indices(indices); + return this; + } + + /** + * Should the node indices stats be returned. + */ + public ProtobufNodesStatsRequestBuilder setIndices(ProtobufCommonStatsFlags indices) { + request.indices(indices); + return this; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java new file mode 100644 index 0000000000000..248608203dc79 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java @@ -0,0 +1,84 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.stats; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.action.ProtobufFailedNodeException; +import org.opensearch.action.support.nodes.ProtobufBaseNodesResponse; +import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentFactory; + +import java.io.IOException; +import java.util.List; + +/** + * Transport response for obtaining OpenSearch Node Stats +* +* @opensearch.internal +*/ +public class ProtobufNodesStatsResponse extends ProtobufBaseNodesResponse implements ToXContentFragment { + + public ProtobufNodesStatsResponse(CodedInputStream in) throws IOException { + super(in); + } + + public ProtobufNodesStatsResponse( + ProtobufClusterName clusterName, + List nodes, + List failures + ) { + super(clusterName, nodes, failures); + } + + @Override + protected List readNodesFrom(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + return protobufStreamInput.readList(ProtobufNodeStats::new); + } + + @Override + protected void writeNodesTo(CodedOutputStream out, List nodes) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeCollection(nodes, (o, v) -> v.writeTo(o)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("nodes"); + for (ProtobufNodeStats nodeStats : getNodes()) { + builder.startObject(nodeStats.getNode().getId()); + builder.field("timestamp", nodeStats.getTimestamp()); + nodeStats.toXContent(builder, params); + + builder.endObject(); + } + builder.endObject(); + + return builder; + } + + @Override + public String toString() { + try { + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return Strings.toString(builder); + } catch (IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java index 5ab9aa023a298..3f3c2a4d80d0e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java @@ -8,7 +8,6 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.action.ActionType; import org.opensearch.action.ProtobufActionType; /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java index 6798c3b8c46e5..75c9ab839ba45 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java @@ -51,31 +51,31 @@ public ProtobufClusterStateRequest() {} public ProtobufClusterStateRequest(CodedInputStream in) throws IOException { super(in); - ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); routingTable = in.readBool(); nodes = in.readBool(); metadata = in.readBool(); blocks = in.readBool(); customs = in.readBool(); - indices = protobufStreamInput.readStringArray(in); + indices = protobufStreamInput.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptionsProtobuf(in); - waitForTimeout = protobufStreamInput.readTimeValue(in); - waitForMetadataVersion = protobufStreamInput.readOptionalLong(in); + waitForTimeout = protobufStreamInput.readTimeValue(); + waitForMetadataVersion = protobufStreamInput.readOptionalLong(); } @Override public void writeTo(CodedOutputStream out) throws IOException { super.writeTo(out); - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); out.writeBoolNoTag(routingTable); out.writeBoolNoTag(nodes); out.writeBoolNoTag(metadata); out.writeBoolNoTag(blocks); out.writeBoolNoTag(customs); - protobufStreamOutput.writeStringArray(indices, out); + protobufStreamOutput.writeStringArray(indices); indicesOptions.writeIndicesOptionsProtobuf(out); - protobufStreamOutput.writeTimeValue(waitForTimeout, out); - protobufStreamOutput.writeOptionalLong(waitForMetadataVersion, out); + protobufStreamOutput.writeTimeValue(waitForTimeout); + protobufStreamOutput.writeOptionalLong(waitForMetadataVersion); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequestBuilder.java index 6255d47b9bd66..93c45eccd89a4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequestBuilder.java @@ -9,9 +9,7 @@ package org.opensearch.action.admin.cluster.state; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.action.support.clustermanager.ProtobufClusterManagerNodeReadOperationRequestBuilder; -import org.opensearch.client.OpenSearchClient; import org.opensearch.client.ProtobufOpenSearchClient; import org.opensearch.common.unit.TimeValue; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java index a88e42d78c66a..60892290c94a5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java @@ -35,13 +35,11 @@ public class ProtobufClusterStateResponse extends ProtobufActionResponse { private ProtobufClusterState clusterState; private boolean waitForTimedOut = false; - private final ProtobufStreamInput protobufStreamInput; - public ProtobufClusterStateResponse(CodedInputStream in) throws IOException { super(in); - protobufStreamInput = new ProtobufStreamInput(); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); clusterName = new ProtobufClusterName(in); - clusterState = protobufStreamInput.readOptionalWriteable(innerIn -> ProtobufClusterState.readFrom(innerIn, null), in); + clusterState = protobufStreamInput.readOptionalWriteable(innerIn -> ProtobufClusterState.readFrom(innerIn, null)); waitForTimedOut = in.readBool(); } @@ -49,7 +47,6 @@ public ProtobufClusterStateResponse(ProtobufClusterName clusterName, ProtobufClu this.clusterName = clusterName; this.clusterState = clusterState; this.waitForTimedOut = waitForTimedOut; - protobufStreamInput = new ProtobufStreamInput(); } /** @@ -77,9 +74,9 @@ public boolean isWaitForTimedOut() { @Override public void writeTo(CodedOutputStream out) throws IOException { - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); clusterName.writeTo(out); - protobufStreamOutput.writeOptionalWriteable(clusterState, out); + protobufStreamOutput.writeOptionalWriteable(clusterState); out.writeBoolNoTag(waitForTimedOut); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStats.java new file mode 100644 index 0000000000000..89200608328dc --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStats.java @@ -0,0 +1,460 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.indices.stats; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.cache.query.ProtobufQueryCacheStats; +import org.opensearch.index.cache.request.ProtobufRequestCacheStats; +import org.opensearch.index.engine.ProtobufSegmentsStats; +import org.opensearch.index.fielddata.ProtobufFieldDataStats; +import org.opensearch.index.flush.ProtobufFlushStats; +import org.opensearch.index.get.ProtobufGetStats; +import org.opensearch.index.merge.ProtobufMergeStats; +import org.opensearch.index.recovery.ProtobufRecoveryStats; +import org.opensearch.index.refresh.ProtobufRefreshStats; +import org.opensearch.index.search.stats.ProtobufSearchStats; +import org.opensearch.index.shard.ProtobufDocsStats; +import org.opensearch.index.shard.ProtobufIndexingStats; +import org.opensearch.index.store.ProtobufStoreStats; +import org.opensearch.index.translog.ProtobufTranslogStats; +import org.opensearch.index.warmer.ProtobufWarmerStats; +import org.opensearch.search.suggest.completion.ProtobufCompletionStats; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; +import java.util.stream.Stream; + +/** + * Common Stats for OpenSearch +* +* @opensearch.internal +*/ +public class ProtobufCommonStats implements ProtobufWriteable, ToXContentFragment { + + @Nullable + public ProtobufDocsStats docs; + + @Nullable + public ProtobufStoreStats store; + + @Nullable + public ProtobufIndexingStats indexing; + + @Nullable + public ProtobufGetStats get; + + @Nullable + public ProtobufSearchStats search; + + @Nullable + public ProtobufMergeStats merge; + + @Nullable + public ProtobufRefreshStats refresh; + + @Nullable + public ProtobufFlushStats flush; + + @Nullable + public ProtobufWarmerStats warmer; + + @Nullable + public ProtobufQueryCacheStats queryCache; + + @Nullable + public ProtobufFieldDataStats fieldData; + + @Nullable + public ProtobufCompletionStats completion; + + @Nullable + public ProtobufSegmentsStats segments; + + @Nullable + public ProtobufTranslogStats translog; + + @Nullable + public ProtobufRequestCacheStats requestCache; + + @Nullable + public ProtobufRecoveryStats recoveryStats; + + public ProtobufCommonStats() { + this(ProtobufCommonStatsFlags.NONE); + } + + public ProtobufCommonStats(ProtobufCommonStatsFlags flags) { + ProtobufCommonStatsFlags.Flag[] setFlags = flags.getFlags(); + + for (ProtobufCommonStatsFlags.Flag flag : setFlags) { + switch (flag) { + case Docs: + docs = new ProtobufDocsStats(); + break; + case Store: + store = new ProtobufStoreStats(); + break; + case Indexing: + indexing = new ProtobufIndexingStats(); + break; + case Get: + get = new ProtobufGetStats(); + break; + case Search: + search = new ProtobufSearchStats(); + break; + case Merge: + merge = new ProtobufMergeStats(); + break; + case Refresh: + refresh = new ProtobufRefreshStats(); + break; + case Flush: + flush = new ProtobufFlushStats(); + break; + case Warmer: + warmer = new ProtobufWarmerStats(); + break; + case QueryCache: + queryCache = new ProtobufQueryCacheStats(); + break; + case FieldData: + fieldData = new ProtobufFieldDataStats(); + break; + case Completion: + completion = new ProtobufCompletionStats(); + break; + case Segments: + segments = new ProtobufSegmentsStats(); + break; + case Translog: + translog = new ProtobufTranslogStats(); + break; + case RequestCache: + requestCache = new ProtobufRequestCacheStats(); + break; + case Recovery: + recoveryStats = new ProtobufRecoveryStats(); + break; + default: + throw new IllegalStateException("Unknown Flag: " + flag); + } + } + } + + public ProtobufCommonStats(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + docs = protobufStreamInput.readOptionalWriteable(ProtobufDocsStats::new); + store = protobufStreamInput.readOptionalWriteable(ProtobufStoreStats::new); + indexing = protobufStreamInput.readOptionalWriteable(ProtobufIndexingStats::new); + get = protobufStreamInput.readOptionalWriteable(ProtobufGetStats::new); + search = protobufStreamInput.readOptionalWriteable(ProtobufSearchStats::new); + merge = protobufStreamInput.readOptionalWriteable(ProtobufMergeStats::new); + refresh = protobufStreamInput.readOptionalWriteable(ProtobufRefreshStats::new); + flush = protobufStreamInput.readOptionalWriteable(ProtobufFlushStats::new); + warmer = protobufStreamInput.readOptionalWriteable(ProtobufWarmerStats::new); + queryCache = protobufStreamInput.readOptionalWriteable(ProtobufQueryCacheStats::new); + fieldData = protobufStreamInput.readOptionalWriteable(ProtobufFieldDataStats::new); + completion = protobufStreamInput.readOptionalWriteable(ProtobufCompletionStats::new); + segments = protobufStreamInput.readOptionalWriteable(ProtobufSegmentsStats::new); + translog = protobufStreamInput.readOptionalWriteable(ProtobufTranslogStats::new); + requestCache = protobufStreamInput.readOptionalWriteable(ProtobufRequestCacheStats::new); + recoveryStats = protobufStreamInput.readOptionalWriteable(ProtobufRecoveryStats::new); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeOptionalWriteable(docs); + protobufStreamOutput.writeOptionalWriteable(store); + protobufStreamOutput.writeOptionalWriteable(indexing); + protobufStreamOutput.writeOptionalWriteable(get); + protobufStreamOutput.writeOptionalWriteable(search); + protobufStreamOutput.writeOptionalWriteable(merge); + protobufStreamOutput.writeOptionalWriteable(refresh); + protobufStreamOutput.writeOptionalWriteable(flush); + protobufStreamOutput.writeOptionalWriteable(warmer); + protobufStreamOutput.writeOptionalWriteable(queryCache); + protobufStreamOutput.writeOptionalWriteable(fieldData); + protobufStreamOutput.writeOptionalWriteable(completion); + protobufStreamOutput.writeOptionalWriteable(segments); + protobufStreamOutput.writeOptionalWriteable(translog); + protobufStreamOutput.writeOptionalWriteable(requestCache); + protobufStreamOutput.writeOptionalWriteable(recoveryStats); + } + + public void add(ProtobufCommonStats stats) { + if (docs == null) { + if (stats.getDocs() != null) { + docs = new ProtobufDocsStats(); + docs.add(stats.getDocs()); + } + } else { + docs.add(stats.getDocs()); + } + if (store == null) { + if (stats.getStore() != null) { + store = new ProtobufStoreStats(); + store.add(stats.getStore()); + } + } else { + store.add(stats.getStore()); + } + if (indexing == null) { + if (stats.getIndexing() != null) { + indexing = new ProtobufIndexingStats(); + indexing.add(stats.getIndexing()); + } + } else { + indexing.add(stats.getIndexing()); + } + if (get == null) { + if (stats.getGet() != null) { + get = new ProtobufGetStats(); + get.add(stats.getGet()); + } + } else { + get.add(stats.getGet()); + } + if (search == null) { + if (stats.getSearch() != null) { + search = new ProtobufSearchStats(); + search.add(stats.getSearch()); + } + } else { + search.add(stats.getSearch()); + } + if (merge == null) { + if (stats.getMerge() != null) { + merge = new ProtobufMergeStats(); + merge.add(stats.getMerge()); + } + } else { + merge.add(stats.getMerge()); + } + if (refresh == null) { + if (stats.getRefresh() != null) { + refresh = new ProtobufRefreshStats(); + refresh.add(stats.getRefresh()); + } + } else { + refresh.add(stats.getRefresh()); + } + if (flush == null) { + if (stats.getFlush() != null) { + flush = new ProtobufFlushStats(); + flush.add(stats.getFlush()); + } + } else { + flush.add(stats.getFlush()); + } + if (warmer == null) { + if (stats.getWarmer() != null) { + warmer = new ProtobufWarmerStats(); + warmer.add(stats.getWarmer()); + } + } else { + warmer.add(stats.getWarmer()); + } + if (queryCache == null) { + if (stats.getQueryCache() != null) { + queryCache = new ProtobufQueryCacheStats(); + queryCache.add(stats.getQueryCache()); + } + } else { + queryCache.add(stats.getQueryCache()); + } + + if (fieldData == null) { + if (stats.getFieldData() != null) { + fieldData = new ProtobufFieldDataStats(); + fieldData.add(stats.getFieldData()); + } + } else { + fieldData.add(stats.getFieldData()); + } + if (completion == null) { + if (stats.getCompletion() != null) { + completion = new ProtobufCompletionStats(); + completion.add(stats.getCompletion()); + } + } else { + completion.add(stats.getCompletion()); + } + if (segments == null) { + if (stats.getSegments() != null) { + segments = new ProtobufSegmentsStats(); + segments.add(stats.getSegments()); + } + } else { + segments.add(stats.getSegments()); + } + if (translog == null) { + if (stats.getTranslog() != null) { + translog = new ProtobufTranslogStats(); + translog.add(stats.getTranslog()); + } + } else { + translog.add(stats.getTranslog()); + } + if (requestCache == null) { + if (stats.getRequestCache() != null) { + requestCache = new ProtobufRequestCacheStats(); + requestCache.add(stats.getRequestCache()); + } + } else { + requestCache.add(stats.getRequestCache()); + } + if (recoveryStats == null) { + if (stats.getRecoveryStats() != null) { + recoveryStats = new ProtobufRecoveryStats(); + recoveryStats.add(stats.getRecoveryStats()); + } + } else { + recoveryStats.add(stats.getRecoveryStats()); + } + } + + @Nullable + public ProtobufDocsStats getDocs() { + return this.docs; + } + + @Nullable + public ProtobufStoreStats getStore() { + return store; + } + + @Nullable + public ProtobufIndexingStats getIndexing() { + return indexing; + } + + @Nullable + public ProtobufGetStats getGet() { + return get; + } + + @Nullable + public ProtobufSearchStats getSearch() { + return search; + } + + @Nullable + public ProtobufMergeStats getMerge() { + return merge; + } + + @Nullable + public ProtobufRefreshStats getRefresh() { + return refresh; + } + + @Nullable + public ProtobufFlushStats getFlush() { + return flush; + } + + @Nullable + public ProtobufWarmerStats getWarmer() { + return this.warmer; + } + + @Nullable + public ProtobufQueryCacheStats getQueryCache() { + return this.queryCache; + } + + @Nullable + public ProtobufFieldDataStats getFieldData() { + return this.fieldData; + } + + @Nullable + public ProtobufCompletionStats getCompletion() { + return completion; + } + + @Nullable + public ProtobufSegmentsStats getSegments() { + return segments; + } + + @Nullable + public ProtobufTranslogStats getTranslog() { + return translog; + } + + @Nullable + public ProtobufRequestCacheStats getRequestCache() { + return requestCache; + } + + @Nullable + public ProtobufRecoveryStats getRecoveryStats() { + return recoveryStats; + } + + /** + * Utility method which computes total memory by adding + * FieldData, PercolatorCache, Segments (index writer, version map) + */ + public ByteSizeValue getTotalMemory() { + long size = 0; + if (this.getFieldData() != null) { + size += this.getFieldData().getMemorySizeInBytes(); + } + if (this.getQueryCache() != null) { + size += this.getQueryCache().getMemorySizeInBytes(); + } + if (this.getSegments() != null) { + size += this.getSegments().getIndexWriterMemoryInBytes() + this.getSegments().getVersionMapMemoryInBytes(); + } + + return new ByteSizeValue(size); + } + + // note, requires a wrapping object + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + final Stream stream = Arrays.stream( + new ToXContent[] { + docs, + store, + indexing, + get, + search, + merge, + refresh, + flush, + warmer, + queryCache, + fieldData, + completion, + segments, + translog, + requestCache, + recoveryStats } + ).filter(Objects::nonNull); + for (ToXContent toXContent : ((Iterable) stream::iterator)) { + toXContent.toXContent(builder, params); + } + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStatsFlags.java index 82df111cceda4..d4a306d7a79ea 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStatsFlags.java @@ -15,9 +15,6 @@ import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; import org.opensearch.common.io.stream.ProtobufWriteable; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.io.stream.Writeable; import java.io.IOException; import java.util.Collections; @@ -30,8 +27,8 @@ */ public class ProtobufCommonStatsFlags implements ProtobufWriteable, Cloneable { - public static final CommonStatsFlags ALL = new CommonStatsFlags().all(); - public static final CommonStatsFlags NONE = new CommonStatsFlags().clear(); + public static final ProtobufCommonStatsFlags ALL = new ProtobufCommonStatsFlags().all(); + public static final ProtobufCommonStatsFlags NONE = new ProtobufCommonStatsFlags().clear(); private EnumSet flags = EnumSet.allOf(Flag.class); private String[] groups = null; @@ -53,8 +50,8 @@ public ProtobufCommonStatsFlags(Flag... flags) { } public ProtobufCommonStatsFlags(CodedInputStream in) throws IOException { - ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); - final long longFlags = in.readLong(); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + final long longFlags = in.readInt64(); flags.clear(); for (Flag flag : Flag.values()) { if ((longFlags & (1 << flag.getIndex())) != 0) { @@ -62,11 +59,11 @@ public ProtobufCommonStatsFlags(CodedInputStream in) throws IOException { } } if (protobufStreamInput.getVersion().before(Version.V_2_0_0)) { - protobufStreamInput.readStringArray(in); + protobufStreamInput.readStringArray(); } - groups = protobufStreamInput.readStringArray(in); - fieldDataFields = protobufStreamInput.readStringArray(in); - completionDataFields = protobufStreamInput.readStringArray(in); + groups = protobufStreamInput.readStringArray(); + fieldDataFields = protobufStreamInput.readStringArray(); + completionDataFields = protobufStreamInput.readStringArray(); includeSegmentFileSizes = in.readBool(); includeUnloadedSegments = in.readBool(); includeAllShardIndexingPressureTrackers = in.readBool(); @@ -75,7 +72,7 @@ public ProtobufCommonStatsFlags(CodedInputStream in) throws IOException { @Override public void writeTo(CodedOutputStream out) throws IOException { - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); long longFlags = 0; for (Flag flag : flags) { longFlags |= (1 << flag.getIndex()); @@ -83,11 +80,11 @@ public void writeTo(CodedOutputStream out) throws IOException { out.writeInt64NoTag(longFlags); if (protobufStreamOutput.getVersion().before(Version.V_2_0_0)) { - protobufStreamOutput.writeStringArrayNullable(Strings.EMPTY_ARRAY, out); + protobufStreamOutput.writeStringArrayNullable(Strings.EMPTY_ARRAY); } - protobufStreamOutput.writeStringArrayNullable(groups, out); - protobufStreamOutput.writeStringArrayNullable(fieldDataFields, out); - protobufStreamOutput.writeStringArrayNullable(completionDataFields, out); + protobufStreamOutput.writeStringArrayNullable(groups); + protobufStreamOutput.writeStringArrayNullable(fieldDataFields); + protobufStreamOutput.writeStringArrayNullable(completionDataFields); out.writeBoolNoTag(includeSegmentFileSizes); out.writeBoolNoTag(includeUnloadedSegments); out.writeBoolNoTag(includeAllShardIndexingPressureTrackers); diff --git a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java index 32da5e6002f55..82a7fc82e5f5f 100644 --- a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java @@ -283,10 +283,10 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { } public void writeIndicesOptionsProtobuf(CodedOutputStream out) throws IOException { - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); EnumSet

+ * Supported Platforms: All. + */ + public short getPercent() { + return percent; + } + + /** + * Get the Process cpu time (sum of User and Sys). + *

+ * Supported Platforms: All. + */ + public TimeValue getTotal() { + return new TimeValue(total); + } + } +} diff --git a/server/src/main/java/org/opensearch/node/ProtobufAdaptiveSelectionStats.java b/server/src/main/java/org/opensearch/node/ProtobufAdaptiveSelectionStats.java new file mode 100644 index 0000000000000..da47e0226b69a --- /dev/null +++ b/server/src/main/java/org/opensearch/node/ProtobufAdaptiveSelectionStats.java @@ -0,0 +1,121 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.node; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.set.Sets; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +/** + * Class representing statistics about adaptive replica selection. This includes +* EWMA of queue size, service time, and response time, as well as outgoing +* searches to each node and the "rank" based on the ARS formula. +* +* @opensearch.internal +*/ +public class ProtobufAdaptiveSelectionStats implements ProtobufWriteable, ToXContentFragment { + + private final Map clientOutgoingConnections; + private final Map nodeComputedStats; + + public ProtobufAdaptiveSelectionStats( + Map clientConnections, + Map nodeComputedStats + ) { + this.clientOutgoingConnections = clientConnections; + this.nodeComputedStats = nodeComputedStats; + } + + public ProtobufAdaptiveSelectionStats(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + this.clientOutgoingConnections = protobufStreamInput.readMap(CodedInputStream::readString, CodedInputStream::readInt64); + this.nodeComputedStats = protobufStreamInput.readMap( + CodedInputStream::readString, + ResponseCollectorService.ProtobufComputedNodeStats::new + ); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeMap( + this.clientOutgoingConnections, + CodedOutputStream::writeStringNoTag, + CodedOutputStream::writeInt64NoTag + ); + protobufStreamOutput.writeMap( + this.nodeComputedStats, + CodedOutputStream::writeStringNoTag, + (stream, stats) -> stats.writeTo(stream) + ); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("adaptive_selection"); + Set allNodeIds = Sets.union(clientOutgoingConnections.keySet(), nodeComputedStats.keySet()); + for (String nodeId : allNodeIds) { + builder.startObject(nodeId); + ResponseCollectorService.ProtobufComputedNodeStats stats = nodeComputedStats.get(nodeId); + if (stats != null) { + long outgoingSearches = clientOutgoingConnections.getOrDefault(nodeId, 0L); + builder.field("outgoing_searches", outgoingSearches); + builder.field("avg_queue_size", stats.queueSize); + if (builder.humanReadable()) { + builder.field("avg_service_time", new TimeValue((long) stats.serviceTime, TimeUnit.NANOSECONDS).toString()); + } + builder.field("avg_service_time_ns", (long) stats.serviceTime); + if (builder.humanReadable()) { + builder.field("avg_response_time", new TimeValue((long) stats.responseTime, TimeUnit.NANOSECONDS).toString()); + } + builder.field("avg_response_time_ns", (long) stats.responseTime); + builder.field("rank", String.format(Locale.ROOT, "%.1f", stats.rank(outgoingSearches))); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + + /** + * Returns a map of node id to the outgoing search requests to that node + */ + public Map getOutgoingConnections() { + return clientOutgoingConnections; + } + + /** + * Returns a map of node id to the computed stats + */ + public Map getComputedStats() { + return nodeComputedStats; + } + + /** + * Returns a map of node id to the ranking of the nodes based on the adaptive replica formula + */ + public Map getRanks() { + return nodeComputedStats.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().rank(clientOutgoingConnections.getOrDefault(e.getKey(), 0L)))); + } +} diff --git a/server/src/main/java/org/opensearch/node/ProtobufNodeClosedException.java b/server/src/main/java/org/opensearch/node/ProtobufNodeClosedException.java new file mode 100644 index 0000000000000..cf5ab03c8bb91 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/ProtobufNodeClosedException.java @@ -0,0 +1,31 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.node; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.ProtobufOpenSearchException; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; + +import java.io.IOException; + +/** + * An exception indicating that node is closed. +* +* @opensearch.internal +*/ +public class ProtobufNodeClosedException extends ProtobufOpenSearchException { + + public ProtobufNodeClosedException(ProtobufDiscoveryNode node) { + super("node closed " + node); + } + + public ProtobufNodeClosedException(CodedInputStream in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/node/ResponseCollectorService.java b/server/src/main/java/org/opensearch/node/ResponseCollectorService.java index 3b73ec8f9622d..88b29764e4f07 100644 --- a/server/src/main/java/org/opensearch/node/ResponseCollectorService.java +++ b/server/src/main/java/org/opensearch/node/ResponseCollectorService.java @@ -6,37 +6,16 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - package org.opensearch.node; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.ExponentiallyWeightedMovingAverage; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -219,6 +198,109 @@ public String toString() { } } + /** + * Struct-like class encapsulating a point-in-time snapshot of a particular + * node's statistics. This includes the EWMA of queue size, response time, + * and service time. + */ + public static class ProtobufComputedNodeStats implements ProtobufWriteable { + // We store timestamps with nanosecond precision, however, the + // formula specifies milliseconds, therefore we need to convert + // the values so the times don't unduely weight the formula + private final double FACTOR = 1000000.0; + private final int clientNum; + + private double cachedRank = 0; + + public final String nodeId; + public final int queueSize; + public final double responseTime; + public final double serviceTime; + + public ProtobufComputedNodeStats(String nodeId, int clientNum, int queueSize, double responseTime, double serviceTime) { + this.nodeId = nodeId; + this.clientNum = clientNum; + this.queueSize = queueSize; + this.responseTime = responseTime; + this.serviceTime = serviceTime; + } + + ProtobufComputedNodeStats(int clientNum, NodeStatistics nodeStats) { + this( + nodeStats.nodeId, + clientNum, + (int) nodeStats.queueSize.getAverage(), + nodeStats.responseTime.getAverage(), + nodeStats.serviceTime + ); + } + + ProtobufComputedNodeStats(CodedInputStream in) throws IOException { + this.nodeId = in.readString(); + this.clientNum = in.readInt32(); + this.queueSize = in.readInt32(); + this.responseTime = in.readDouble(); + this.serviceTime = in.readDouble(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeStringNoTag(this.nodeId); + out.writeInt32NoTag(this.clientNum); + out.writeInt32NoTag(this.queueSize); + out.writeDoubleNoTag(this.responseTime); + out.writeDoubleNoTag(this.serviceTime); + } + + /** + * Rank this copy of the data, according to the adaptive replica selection formula from the C3 paper + * https://www.usenix.org/system/files/conference/nsdi15/nsdi15-paper-suresh.pdf + */ + private double innerRank(long outstandingRequests) { + // the concurrency compensation is defined as the number of + // outstanding requests from the client to the node times the number + // of clients in the system + double concurrencyCompensation = outstandingRequests * clientNum; + + // Cubic queue adjustment factor. The paper chose 3 though we could + // potentially make this configurable if desired. + int queueAdjustmentFactor = 3; + + // EWMA of queue size + double qBar = queueSize; + double qHatS = 1 + concurrencyCompensation + qBar; + + // EWMA of response time + double rS = responseTime / FACTOR; + // EWMA of service time + double muBarS = serviceTime / FACTOR; + + // The final formula + double rank = rS - (1.0 / muBarS) + (Math.pow(qHatS, queueAdjustmentFactor) / muBarS); + return rank; + } + + public double rank(long outstandingRequests) { + if (cachedRank == 0) { + cachedRank = innerRank(outstandingRequests); + } + return cachedRank; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ComputedNodeStats["); + sb.append(nodeId).append("]("); + sb.append("nodes: ").append(clientNum); + sb.append(", queue: ").append(queueSize); + sb.append(", response time: ").append(String.format(Locale.ROOT, "%.1f", responseTime)); + sb.append(", service time: ").append(String.format(Locale.ROOT, "%.1f", serviceTime)); + sb.append(", rank: ").append(String.format(Locale.ROOT, "%.1f", rank(1))); + sb.append(")"); + return sb.toString(); + } + } + /** * Class encapsulating a node's exponentially weighted queue size, response * time, and service time, however, this class is private and intended only diff --git a/server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java b/server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java index fcc06a0d11ba3..29620fb5ee5dd 100644 --- a/server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java @@ -127,7 +127,7 @@ public ProtobufPluginInfo( * @throws IOException if an I/O exception occurred reading the plugin info from the stream */ public ProtobufPluginInfo(final CodedInputStream in) throws IOException { - ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); this.name = in.readString(); this.description = in.readString(); this.version = in.readString(); @@ -135,13 +135,13 @@ public ProtobufPluginInfo(final CodedInputStream in) throws IOException { this.javaVersion = in.readString(); this.classname = in.readString(); this.customFolderName = in.readString(); - this.extendedPlugins = protobufStreamInput.readList(CodedInputStream::readString, in); + this.extendedPlugins = protobufStreamInput.readList(CodedInputStream::readString); this.hasNativeController = in.readBool(); } @Override public void writeTo(final CodedOutputStream out) throws IOException { - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); out.writeStringNoTag(name); out.writeStringNoTag(description); out.writeStringNoTag(version); @@ -153,7 +153,7 @@ public void writeTo(final CodedOutputStream out) throws IOException { } else { out.writeStringNoTag(name); } - protobufStreamOutput.writeCollection(extendedPlugins, CodedOutputStream::writeStringNoTag, out); + protobufStreamOutput.writeCollection(extendedPlugins, CodedOutputStream::writeStringNoTag); out.writeBoolNoTag(hasNativeController); } diff --git a/server/src/main/java/org/opensearch/rest/ClientAgnosticRestHandler.java b/server/src/main/java/org/opensearch/rest/ClientAgnosticRestHandler.java new file mode 100644 index 0000000000000..f37b1f3cee272 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/ClientAgnosticRestHandler.java @@ -0,0 +1,27 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest; + +/** + * Handler for REST requests +* +* @opensearch.api +*/ +@FunctionalInterface +public interface ClientAgnosticRestHandler { + + /** + * Handles a rest request. + * @param request The request to handle + * @param channel The channel to write the request response to + * @param client A client to use to make internal requests on behalf of the original request + */ + void handleRequest(RestRequest request, RestChannel channel, T client) throws Exception; + +} diff --git a/server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java b/server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java new file mode 100644 index 0000000000000..42dd853e87aba --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java @@ -0,0 +1,309 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.spell.LevenshteinDistance; +import org.apache.lucene.util.CollectionUtil; +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.support.clustermanager.ProtobufClusterManagerNodeRequest; +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.rest.action.admin.cluster.RestNodesUsageAction; +import org.opensearch.tasks.ProtobufTask; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.atomic.LongAdder; +import java.util.stream.Collectors; + +/** + * Base handler for REST requests. +*

+* This handler makes sure that the headers & context of the handled {@link RestRequest requests} are copied over to +* the transport requests executed by the associated client. While the context is fully copied over, not all the headers +* are copied, but a selected few. It is possible to control what headers are copied over by returning them in +* {@link ActionPlugin#getRestHeaders()}. +* +* @opensearch.api +*/ +public abstract class ProtobufBaseRestHandler implements ProtobufRestHandler { + + public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = Setting.boolSetting( + "rest.action.multi.allow_explicit_index", + true, + Property.NodeScope + ); + + private final LongAdder usageCount = new LongAdder(); + /** + * @deprecated declare your own logger. + */ + @Deprecated + protected Logger logger = LogManager.getLogger(getClass()); + + public final long getUsageCount() { + return usageCount.sum(); + } + + /** + * @return the name of this handler. The name should be human readable and + * should describe the action that will performed when this API is + * called. This name is used in the response to the + * {@link RestNodesUsageAction}. + */ + public abstract String getName(); + + @Override + public final void handleRequest(RestRequest request, RestChannel channel, ProtobufNodeClient client) throws Exception { + // prepare the request for execution; has the side effect of touching the request parameters + final RestChannelConsumer action = prepareRequest(request, client); + + // validate unconsumed params, but we must exclude params used to format the response + // use a sorted set so the unconsumed parameters appear in a reliable sorted order + final SortedSet unconsumedParams = request.unconsumedParams() + .stream() + .filter(p -> !responseParams().contains(p)) + .collect(Collectors.toCollection(TreeSet::new)); + + // validate the non-response params + if (!unconsumedParams.isEmpty()) { + final Set candidateParams = new HashSet<>(); + candidateParams.addAll(request.consumedParams()); + candidateParams.addAll(responseParams()); + throw new IllegalArgumentException(unrecognized(request, unconsumedParams, candidateParams, "parameter")); + } + + if (request.hasContent() && request.isContentConsumed() == false) { + throw new IllegalArgumentException("request [" + request.method() + " " + request.path() + "] does not support having a body"); + } + + usageCount.increment(); + // execute the action + action.accept(channel); + } + + public static String unrecognizedStrings( + final RestRequest request, + final Set invalids, + final Set candidates, + final String detail + ) { + StringBuilder message = new StringBuilder( + String.format(Locale.ROOT, "request [%s] contains unrecognized %s%s: ", request.path(), detail, invalids.size() > 1 ? "s" : "") + ); + boolean first = true; + for (final String invalid : invalids) { + final LevenshteinDistance ld = new LevenshteinDistance(); + final List> scoredParams = new ArrayList<>(); + for (final String candidate : candidates) { + final float distance = ld.getDistance(invalid, candidate); + if (distance > 0.5f) { + scoredParams.add(new Tuple<>(distance, candidate)); + } + } + CollectionUtil.timSort(scoredParams, (a, b) -> { + // sort by distance in reverse order, then parameter name for equal distances + int compare = a.v1().compareTo(b.v1()); + if (compare != 0) return -compare; + else return a.v2().compareTo(b.v2()); + }); + if (first == false) { + message.append(", "); + } + message.append("[").append(invalid).append("]"); + final List keys = scoredParams.stream().map(Tuple::v2).collect(Collectors.toList()); + if (keys.isEmpty() == false) { + message.append(" -> did you mean "); + if (keys.size() == 1) { + message.append("[").append(keys.get(0)).append("]"); + } else { + message.append("any of ").append(keys.toString()); + } + message.append("?"); + } + first = false; + } + + return message.toString(); + } + + /** + * Returns a String message of the detail of any unrecognized error occurred. The string is intended for use in error messages to be returned to the user. + * + * @param request The request that caused the exception + * @param invalids Strings from the request which were unable to be understood. + * @param candidates A set of words that are most likely to be the valid strings determined invalid, to be suggested to the user. + * @param detail The parameter contains the details of the exception. + * @return a String that contains the message. + */ + protected final String unrecognized( + final RestRequest request, + final Set invalids, + final Set candidates, + final String detail + ) { + return unrecognizedStrings(request, invalids, candidates, detail); + } + + /** + * REST requests are handled by preparing a channel consumer that represents the execution of + * the request against a channel. + */ + @FunctionalInterface + protected interface RestChannelConsumer extends CheckedConsumer {} + + /** + * Prepare the request for execution. Implementations should consume all request params before + * returning the runnable for actual execution. Unconsumed params will immediately terminate + * execution of the request. However, some params are only used in processing the response; + * implementations can override {@link ProtobufBaseRestHandler#responseParams()} to indicate such + * params. + * + * @param request the request to execute + * @param client client for executing actions on the local node + * @return the action to execute + * @throws IOException if an I/O exception occurred parsing the request and preparing for + * execution + */ + protected abstract RestChannelConsumer prepareRequest(RestRequest request, ProtobufNodeClient client) throws IOException; + + /** + * Parameters used for controlling the response and thus might not be consumed during + * preparation of the request execution in + * {@link ProtobufBaseRestHandler#prepareRequest(RestRequest, ProtobufNodeClient)}. + * + * @return a set of parameters used to control the response and thus should not trip strict + * URL parameter checks. + */ + protected Set responseParams() { + return Collections.emptySet(); + } + + /** + * Parse the deprecated request parameter 'master_timeout', and add deprecated log if the parameter is used. + * It also validates whether the two parameters 'master_timeout' and 'cluster_manager_timeout' are not assigned together. + * The method is temporarily added in 2.0 duing applying inclusive language. Remove the method along with MASTER_ROLE. + * @param mnr the action request + * @param request the REST request to handle + * @param logger the logger that logs deprecation notices + * @param logMsgKeyPrefix the key prefix of a deprecation message to avoid duplicate messages. + */ + public static void parseDeprecatedMasterTimeoutParameter( + ProtobufClusterManagerNodeRequest mnr, + RestRequest request, + DeprecationLogger logger, + String logMsgKeyPrefix + ) { + final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = + "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead."; + final String DUPLICATE_PARAMETER_ERROR_MESSAGE = + "Please only use one of the request parameters [master_timeout, cluster_manager_timeout]."; + if (request.hasParam("master_timeout")) { + logger.deprecate(logMsgKeyPrefix + "_master_timeout_parameter", MASTER_TIMEOUT_DEPRECATED_MESSAGE); + if (request.hasParam("cluster_manager_timeout")) { + throw new OpenSearchParseException(DUPLICATE_PARAMETER_ERROR_MESSAGE); + } + mnr.clusterManagerNodeTimeout(request.paramAsTime("master_timeout", mnr.clusterManagerNodeTimeout())); + } + } + + /** + * A wrapper for the base handler. + * + * @opensearch.internal + */ + public static class Wrapper extends ProtobufBaseRestHandler { + + protected final ProtobufBaseRestHandler delegate; + + public Wrapper(ProtobufBaseRestHandler delegate) { + this.delegate = Objects.requireNonNull(delegate, "ProtobufBaseRestHandler delegate can not be null"); + } + + @Override + public String getName() { + return delegate.getName(); + } + + @Override + public List routes() { + return delegate.routes(); + } + + @Override + public List deprecatedRoutes() { + return delegate.deprecatedRoutes(); + } + + @Override + public List replacedRoutes() { + return delegate.replacedRoutes(); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, ProtobufNodeClient client) throws IOException { + return delegate.prepareRequest(request, client); + } + + @Override + protected Set responseParams() { + return delegate.responseParams(); + } + + @Override + public boolean canTripCircuitBreaker() { + return delegate.canTripCircuitBreaker(); + } + + @Override + public boolean supportsContentStream() { + return delegate.supportsContentStream(); + } + + @Override + public boolean allowsUnsafeBuffers() { + return delegate.allowsUnsafeBuffers(); + } + + @Override + public boolean allowSystemIndexAccessByDefault() { + return delegate.allowSystemIndexAccessByDefault(); + } + } + + /** + * Return a task immediately when executing some long-running operations asynchronously, like reindex, resize, open, force merge + */ + public RestChannelConsumer sendTask(String nodeId, ProtobufTask task) { + return channel -> { + try (XContentBuilder builder = channel.newBuilder()) { + builder.startObject(); + builder.field("task", nodeId + ":" + task.getId()); + builder.endObject(); + channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); + } + }; + } +} diff --git a/server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java b/server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java new file mode 100644 index 0000000000000..7224847832d5b --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java @@ -0,0 +1,259 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest; + +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.core.xcontent.XContent; +import org.opensearch.rest.RestRequest.Method; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * Handler for REST requests +* +* @opensearch.api +*/ +@FunctionalInterface +public interface ProtobufRestHandler extends ClientAgnosticRestHandler { + + /** + * Handles a rest request. + * @param request The request to handle + * @param channel The channel to write the request response to + * @param client A client to use to make internal requests on behalf of the original request + */ + void handleRequest(RestRequest request, RestChannel channel, ProtobufNodeClient client) throws Exception; + + default boolean canTripCircuitBreaker() { + return true; + } + + /** + * Indicates if the ProtobufRestHandler supports content as a stream. A stream would be multiple objects delineated by + * {@link XContent#streamSeparator()}. If a handler returns true this will affect the types of content that can be sent to + * this endpoint. + */ + default boolean supportsContentStream() { + return false; + } + + /** + * Indicates if the ProtobufRestHandler supports working with pooled buffers. If the request handler will not escape the return + * {@link RestRequest#content()} or any buffers extracted from it then there is no need to make a copies of any pooled buffers in the + * {@link RestRequest} instance before passing a request to this handler. If this instance does not support pooled/unsafe buffers + * {@link RestRequest#ensureSafeBuffers()} should be called on any request before passing it to {@link #handleRequest}. + * + * @return true iff the handler supports requests that make use of pooled buffers + */ + default boolean allowsUnsafeBuffers() { + return false; + } + + /** + * The list of {@link Route}s that this ProtobufRestHandler is responsible for handling. + */ + default List routes() { + return Collections.emptyList(); + } + + /** + * A list of routes handled by this ProtobufRestHandler that are deprecated and do not have a direct + * replacement. If changing the {@code path} or {@code method} of a route, + * use {@link #replacedRoutes()}. + */ + default List deprecatedRoutes() { + return Collections.emptyList(); + } + + /** + * A list of routes handled by this ProtobufRestHandler that have had their {@code path} and/or + * {@code method} changed. The pre-existing {@code route} will be registered + * as deprecated alongside the updated {@code route}. + */ + default List replacedRoutes() { + return Collections.emptyList(); + } + + /** + * Controls whether requests handled by this class are allowed to to access system indices by default. + * @return {@code true} if requests handled by this class should be allowed to access system indices. + */ + default boolean allowSystemIndexAccessByDefault() { + return false; + } + + static ProtobufRestHandler wrapper(ProtobufRestHandler delegate) { + return new Wrapper(delegate); + } + + /** + * Wrapper for a handler. + * + * @opensearch.internal + */ + class Wrapper implements ProtobufRestHandler { + private final ProtobufRestHandler delegate; + + public Wrapper(ProtobufRestHandler delegate) { + this.delegate = Objects.requireNonNull(delegate, "ProtobufRestHandler delegate can not be null"); + } + + @Override + public String toString() { + return delegate.toString(); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, ProtobufNodeClient client) throws Exception { + delegate.handleRequest(request, channel, client); + } + + @Override + public boolean canTripCircuitBreaker() { + return delegate.canTripCircuitBreaker(); + } + + @Override + public boolean supportsContentStream() { + return delegate.supportsContentStream(); + } + + @Override + public boolean allowsUnsafeBuffers() { + return delegate.allowsUnsafeBuffers(); + } + + @Override + public List routes() { + return delegate.routes(); + } + + @Override + public List deprecatedRoutes() { + return delegate.deprecatedRoutes(); + } + + @Override + public List replacedRoutes() { + return delegate.replacedRoutes(); + } + + @Override + public boolean allowSystemIndexAccessByDefault() { + return delegate.allowSystemIndexAccessByDefault(); + } + } + + /** + * Route for the request. + * + * @opensearch.internal + */ + class Route { + + private final String path; + private final Method method; + + public Route(Method method, String path) { + this.path = path; + this.method = method; + } + + public String getPath() { + return path; + } + + public Method getMethod() { + return method; + } + } + + /** + * Represents an API that has been deprecated and is slated for removal. + */ + class DeprecatedRoute extends Route { + + private final String deprecationMessage; + + public DeprecatedRoute(Method method, String path, String deprecationMessage) { + super(method, path); + this.deprecationMessage = deprecationMessage; + } + + public String getDeprecationMessage() { + return deprecationMessage; + } + } + + /** + * Represents an API that has had its {@code path} or {@code method} changed. Holds both the + * new and previous {@code path} and {@code method} combination. + */ + class ReplacedRoute extends Route { + + private final String deprecatedPath; + private final Method deprecatedMethod; + + /** + * Construct replaced routes using new and deprocated methods and new and deprecated paths + * @param method route method + * @param path new route path + * @param deprecatedMethod deprecated method + * @param deprecatedPath deprecated path + */ + public ReplacedRoute(Method method, String path, Method deprecatedMethod, String deprecatedPath) { + super(method, path); + this.deprecatedMethod = deprecatedMethod; + this.deprecatedPath = deprecatedPath; + } + + /** + * Construct replaced routes using route method, new and deprecated paths + * This constructor can be used when both new and deprecated paths use the same method + * @param method route method + * @param path new route path + * @param deprecatedPath deprecated path + */ + public ReplacedRoute(Method method, String path, String deprecatedPath) { + this(method, path, method, deprecatedPath); + } + + /** + * Construct replaced routes using route, new and deprecated prefixes + * @param route route + * @param prefix new route prefix + * @param deprecatedPrefix deprecated prefix + */ + public ReplacedRoute(Route route, String prefix, String deprecatedPrefix) { + this(route.getMethod(), prefix + route.getPath(), deprecatedPrefix + route.getPath()); + } + + public String getDeprecatedPath() { + return deprecatedPath; + } + + public Method getDeprecatedMethod() { + return deprecatedMethod; + } + } + + /** + * Construct replaced routes using routes template and prefixes for new and deprecated paths + * @param routes routes + * @param prefix new prefix + * @param deprecatedPrefix deprecated prefix + * @return new list of API routes prefixed with the prefix string + */ + static List replaceRoutes(List routes, final String prefix, final String deprecatedPrefix) { + return routes.stream().map(route -> new ReplacedRoute(route, prefix, deprecatedPrefix)).collect(Collectors.toList()); + } +} diff --git a/server/src/main/java/org/opensearch/rest/RestHandler.java b/server/src/main/java/org/opensearch/rest/RestHandler.java index 7832649e8ad32..5b1583f260da0 100644 --- a/server/src/main/java/org/opensearch/rest/RestHandler.java +++ b/server/src/main/java/org/opensearch/rest/RestHandler.java @@ -47,7 +47,7 @@ * @opensearch.api */ @FunctionalInterface -public interface RestHandler { +public interface RestHandler extends ClientAgnosticRestHandler { /** * Handles a rest request. diff --git a/server/src/main/java/org/opensearch/rest/action/cat/ProtobufAbstractCatAction.java b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufAbstractCatAction.java new file mode 100644 index 0000000000000..55653f5e738c9 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufAbstractCatAction.java @@ -0,0 +1,78 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest.action.cat; + +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.common.Table; +import org.opensearch.common.io.Streams; +import org.opensearch.common.io.UTF8StreamWriter; +import org.opensearch.common.io.stream.BytesStream; +import org.opensearch.rest.ProtobufBaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestStatus; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static org.opensearch.rest.action.cat.RestTable.buildHelpWidths; +import static org.opensearch.rest.action.cat.RestTable.pad; + +/** + * Base Transport action class for _cat API +* +* @opensearch.api +*/ +public abstract class ProtobufAbstractCatAction extends ProtobufBaseRestHandler { + + protected abstract RestChannelConsumer doCatRequest(RestRequest request, ProtobufNodeClient client); + + protected abstract void documentation(StringBuilder sb); + + protected abstract Table getTableWithHeader(RestRequest request); + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final ProtobufNodeClient client) throws IOException { + boolean helpWanted = request.paramAsBoolean("help", false); + if (helpWanted) { + return channel -> { + Table table = getTableWithHeader(request); + int[] width = buildHelpWidths(table, request); + BytesStream bytesOutput = Streams.flushOnCloseStream(channel.bytesOutput()); + UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOutput); + for (Table.Cell cell : table.getHeaders()) { + // need to do left-align always, so create new cells + pad(new Table.Cell(cell.value), width[0], request, out); + out.append(" | "); + pad(new Table.Cell(cell.attr.containsKey("alias") ? cell.attr.get("alias") : ""), width[1], request, out); + out.append(" | "); + pad(new Table.Cell(cell.attr.containsKey("desc") ? cell.attr.get("desc") : "not available"), width[2], request, out); + out.append("\n"); + } + out.close(); + channel.sendResponse(new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, bytesOutput.bytes())); + }; + } else { + return doCatRequest(request, client); + } + } + + static Set RESPONSE_PARAMS = Collections.unmodifiableSet( + new HashSet<>(Arrays.asList("format", "h", "v", "ts", "pri", "bytes", "size", "time", "s", "timeout")) + ); + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java new file mode 100644 index 0000000000000..ae0924641a16a --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java @@ -0,0 +1,545 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest.action.cat; + +import org.opensearch.action.admin.cluster.node.info.ProtobufNodeInfo; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoResponse; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodeStats; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodesStatsResponse; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.ProtobufDiscoveryNodes; +import org.opensearch.common.Strings; +import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.http.ProtobufHttpInfo; +import org.opensearch.index.cache.query.ProtobufQueryCacheStats; +import org.opensearch.index.cache.request.ProtobufRequestCacheStats; +import org.opensearch.index.engine.ProtobufSegmentsStats; +import org.opensearch.index.fielddata.ProtobufFieldDataStats; +import org.opensearch.index.flush.ProtobufFlushStats; +import org.opensearch.index.get.ProtobufGetStats; +import org.opensearch.index.merge.ProtobufMergeStats; +import org.opensearch.index.refresh.ProtobufRefreshStats; +import org.opensearch.index.search.stats.ProtobufSearchStats; +import org.opensearch.index.shard.ProtobufIndexingStats; +import org.opensearch.indices.ProtobufNodeIndicesStats; +import org.opensearch.monitor.fs.ProtobufFsInfo; +import org.opensearch.monitor.jvm.ProtobufJvmInfo; +import org.opensearch.monitor.jvm.ProtobufJvmStats; +import org.opensearch.monitor.os.ProtobufOsStats; +import org.opensearch.monitor.process.ProtobufProcessInfo; +import org.opensearch.monitor.process.ProtobufProcessStats; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.RestActionListener; +import org.opensearch.rest.action.RestResponseListener; +import org.opensearch.script.ProtobufScriptStats; +import org.opensearch.search.suggest.completion.ProtobufCompletionStats; + +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * _cat API action to get node information +* +* @opensearch.api +*/ +public class ProtobufRestNodesAction extends ProtobufAbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ProtobufRestNodesAction.class); + static final String LOCAL_DEPRECATED_MESSAGE = "Deprecated parameter [local] used. This parameter does not cause this API to act " + + "locally, and should not be used. It will be unsupported in version 8.0."; + + @Override + public List routes() { + return singletonList(new Route(GET, "/_cat/nodes")); + } + + @Override + public String getName() { + return "cat_nodes_action"; + } + + @Override + protected void documentation(StringBuilder sb) { + sb.append("/_cat/nodes\n"); + } + + @Override + public RestChannelConsumer doCatRequest(final RestRequest request, final ProtobufNodeClient client) { + final ProtobufClusterStateRequest clusterStateRequest = new ProtobufClusterStateRequest(); + clusterStateRequest.clear().nodes(true); + if (request.hasParam("local")) { + deprecationLogger.deprecate("cat_nodes_local_parameter", LOCAL_DEPRECATED_MESSAGE); + } + clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); + clusterStateRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", clusterStateRequest.clusterManagerNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); + final boolean fullId = request.paramAsBoolean("full_id", false); + return channel -> client.admin() + .cluster() + .state(clusterStateRequest, new RestActionListener(channel) { + @Override + public void processResponse(final ProtobufClusterStateResponse clusterStateResponse) { + ProtobufNodesInfoRequest nodesInfoRequest = new ProtobufNodesInfoRequest(); + nodesInfoRequest.timeout(request.param("timeout")); + nodesInfoRequest.clear() + .addMetrics( + ProtobufNodesInfoRequest.Metric.JVM.metricName(), + ProtobufNodesInfoRequest.Metric.OS.metricName(), + ProtobufNodesInfoRequest.Metric.PROCESS.metricName(), + ProtobufNodesInfoRequest.Metric.HTTP.metricName() + ); + client.admin().cluster().nodesInfo(nodesInfoRequest, new RestActionListener(channel) { + @Override + public void processResponse(final ProtobufNodesInfoResponse nodesInfoResponse) { + ProtobufNodesStatsRequest nodesStatsRequest = new ProtobufNodesStatsRequest(); + nodesStatsRequest.timeout(request.param("timeout")); + nodesStatsRequest.clear() + .indices(true) + .addMetrics( + ProtobufNodesStatsRequest.Metric.JVM.metricName(), + ProtobufNodesStatsRequest.Metric.OS.metricName(), + ProtobufNodesStatsRequest.Metric.FS.metricName(), + ProtobufNodesStatsRequest.Metric.PROCESS.metricName(), + ProtobufNodesStatsRequest.Metric.SCRIPT.metricName() + ); + client.admin() + .cluster() + .nodesStats(nodesStatsRequest, new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(ProtobufNodesStatsResponse nodesStatsResponse) throws Exception { + return RestTable.buildResponse( + buildTable(fullId, request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse), + channel + ); + } + }); + } + }); + } + }); + } + + @Override + protected Table getTableWithHeader(final RestRequest request) { + Table table = new Table(); + table.startHeaders(); + table.addCell("id", "default:false;alias:id,nodeId;desc:unique node id"); + table.addCell("pid", "default:false;alias:p;desc:process id"); + table.addCell("ip", "alias:i;desc:ip address"); + table.addCell("port", "default:false;alias:po;desc:bound transport port"); + table.addCell("http_address", "default:false;alias:http;desc:bound http address"); + + table.addCell("version", "default:false;alias:v;desc:es version"); + table.addCell("type", "default:false;alias:t;desc:es distribution type"); + table.addCell("build", "default:false;alias:b;desc:es build hash"); + table.addCell("jdk", "default:false;alias:j;desc:jdk version"); + table.addCell("disk.total", "default:false;alias:dt,diskTotal;text-align:right;desc:total disk space"); + table.addCell("disk.used", "default:false;alias:du,diskUsed;text-align:right;desc:used disk space"); + table.addCell("disk.avail", "default:false;alias:d,da,disk,diskAvail;text-align:right;desc:available disk space"); + table.addCell("disk.used_percent", "default:false;alias:dup,diskUsedPercent;text-align:right;desc:used disk space percentage"); + table.addCell("heap.current", "default:false;alias:hc,heapCurrent;text-align:right;desc:used heap"); + table.addCell("heap.percent", "alias:hp,heapPercent;text-align:right;desc:used heap ratio"); + table.addCell("heap.max", "default:false;alias:hm,heapMax;text-align:right;desc:max configured heap"); + table.addCell("ram.current", "default:false;alias:rc,ramCurrent;text-align:right;desc:used machine memory"); + table.addCell("ram.percent", "alias:rp,ramPercent;text-align:right;desc:used machine memory ratio"); + table.addCell("ram.max", "default:false;alias:rm,ramMax;text-align:right;desc:total machine memory"); + table.addCell("file_desc.current", "default:false;alias:fdc,fileDescriptorCurrent;text-align:right;desc:used file descriptors"); + table.addCell( + "file_desc.percent", + "default:false;alias:fdp,fileDescriptorPercent;text-align:right;desc:used file descriptor ratio" + ); + table.addCell("file_desc.max", "default:false;alias:fdm,fileDescriptorMax;text-align:right;desc:max file descriptors"); + + table.addCell("cpu", "alias:cpu;text-align:right;desc:recent cpu usage"); + table.addCell("load_1m", "alias:l;text-align:right;desc:1m load avg"); + table.addCell("load_5m", "alias:l;text-align:right;desc:5m load avg"); + table.addCell("load_15m", "alias:l;text-align:right;desc:15m load avg"); + table.addCell("uptime", "default:false;alias:u;text-align:right;desc:node uptime"); + // TODO: Deprecate "node.role", use "node.roles" which shows full node role names + table.addCell( + "node.role", + "alias:r,role,nodeRole;desc:m:master eligible node, d:data node, i:ingest node, -:coordinating node only" + ); + table.addCell("node.roles", "alias:rs,all roles;desc: -:coordinating node only"); + // TODO: Remove the header alias 'master', after removing MASTER_ROLE. It's added for compatibility when using parameter 'h=master'. + table.addCell("cluster_manager", "alias:cm,m,master;desc:*:current cluster manager"); + table.addCell("name", "alias:n;desc:node name"); + + table.addCell("completion.size", "alias:cs,completionSize;default:false;text-align:right;desc:size of completion"); + + table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache"); + table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions"); + + table.addCell("query_cache.memory_size", "alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); + table.addCell("query_cache.evictions", "alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); + table.addCell("query_cache.hit_count", "alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:query cache hit counts"); + table.addCell( + "query_cache.miss_count", + "alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:query cache miss counts" + ); + + table.addCell("request_cache.memory_size", "alias:rcm,requestCacheMemory;default:false;text-align:right;desc:used request cache"); + table.addCell( + "request_cache.evictions", + "alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions" + ); + table.addCell( + "request_cache.hit_count", + "alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts" + ); + table.addCell( + "request_cache.miss_count", + "alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts" + ); + + table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes"); + table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush"); + + table.addCell("get.current", "alias:gc,getCurrent;default:false;text-align:right;desc:number of current get ops"); + table.addCell("get.time", "alias:gti,getTime;default:false;text-align:right;desc:time spent in get"); + table.addCell("get.total", "alias:gto,getTotal;default:false;text-align:right;desc:number of get ops"); + table.addCell("get.exists_time", "alias:geti,getExistsTime;default:false;text-align:right;desc:time spent in successful gets"); + table.addCell("get.exists_total", "alias:geto,getExistsTotal;default:false;text-align:right;desc:number of successful gets"); + table.addCell("get.missing_time", "alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets"); + table.addCell("get.missing_total", "alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets"); + + table.addCell( + "indexing.delete_current", + "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions" + ); + table.addCell("indexing.delete_time", "alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions"); + table.addCell("indexing.delete_total", "alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops"); + table.addCell( + "indexing.index_current", + "alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops" + ); + table.addCell("indexing.index_time", "alias:iiti,indexingIndexTime;default:false;text-align:right;desc:time spent in indexing"); + table.addCell("indexing.index_total", "alias:iito,indexingIndexTotal;default:false;text-align:right;desc:number of indexing ops"); + table.addCell( + "indexing.index_failed", + "alias:iif,indexingIndexFailed;default:false;text-align:right;desc:number of failed indexing ops" + ); + + table.addCell("merges.current", "alias:mc,mergesCurrent;default:false;text-align:right;desc:number of current merges"); + table.addCell( + "merges.current_docs", + "alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs" + ); + table.addCell("merges.current_size", "alias:mcs,mergesCurrentSize;default:false;text-align:right;desc:size of current merges"); + table.addCell("merges.total", "alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops"); + table.addCell("merges.total_docs", "alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged"); + table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged"); + table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"); + + table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); + table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); + table.addCell("refresh.external_total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total external refreshes"); + table.addCell( + "refresh.external_time", + "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in external refreshes" + ); + table.addCell( + "refresh.listeners", + "alias:rli,refreshListeners;default:false;text-align:right;" + "desc:number of pending refresh listeners" + ); + + table.addCell("script.compilations", "alias:scrcc,scriptCompilations;default:false;text-align:right;desc:script compilations"); + table.addCell( + "script.cache_evictions", + "alias:scrce,scriptCacheEvictions;default:false;text-align:right;desc:script cache evictions" + ); + table.addCell( + "script.compilation_limit_triggered", + "alias:scrclt,scriptCacheCompilationLimitTriggered;default:false;" + + "text-align:right;desc:script cache compilation limit triggered" + ); + + table.addCell("search.fetch_current", "alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops"); + table.addCell("search.fetch_time", "alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase"); + table.addCell("search.fetch_total", "alias:sfto,searchFetchTotal;default:false;text-align:right;desc:total fetch ops"); + table.addCell("search.open_contexts", "alias:so,searchOpenContexts;default:false;text-align:right;desc:open search contexts"); + table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); + table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); + table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); + table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); + table.addCell( + "search.scroll_time", + "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open" + ); + table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); + + table.addCell( + "search.point_in_time_current", + "alias:scc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell( + "search.point_in_time_time", + "alias:scti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell( + "search.point_in_time_total", + "alias:scto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); + + table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); + table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); + table.addCell( + "segments.index_writer_memory", + "alias:siwm,segmentsIndexWriterMemory;default:false;text-align:right;desc:memory used by index writer" + ); + table.addCell( + "segments.version_map_memory", + "alias:svmm,segmentsVersionMapMemory;default:false;text-align:right;desc:memory used by version map" + ); + table.addCell( + "segments.fixed_bitset_memory", + "alias:sfbm,fixedBitsetMemory;default:false;text-align:right;desc:memory used by fixed bit sets for nested object field types" + + " and type filters for types referred in _parent fields" + ); + + table.addCell("suggest.current", "alias:suc,suggestCurrent;default:false;text-align:right;desc:number of current suggest ops"); + table.addCell("suggest.time", "alias:suti,suggestTime;default:false;text-align:right;desc:time spend in suggest"); + table.addCell("suggest.total", "alias:suto,suggestTotal;default:false;text-align:right;desc:number of suggest ops"); + + table.endHeaders(); + return table; + } + + Table buildTable( + boolean fullId, + RestRequest req, + ProtobufClusterStateResponse state, + ProtobufNodesInfoResponse nodesInfo, + ProtobufNodesStatsResponse nodesStats + ) { + + ProtobufDiscoveryNodes nodes = state.getState().nodes(); + String clusterManagerId = nodes.getClusterManagerNodeId(); + Table table = getTableWithHeader(req); + + for (ProtobufDiscoveryNode node : nodes) { + ProtobufNodeInfo info = nodesInfo.getNodesMap().get(node.getId()); + ProtobufNodeStats stats = nodesStats.getNodesMap().get(node.getId()); + + ProtobufJvmInfo jvmInfo = info == null ? null : info.getInfo(ProtobufJvmInfo.class); + ProtobufJvmStats jvmStats = stats == null ? null : stats.getJvm(); + ProtobufFsInfo fsInfo = stats == null ? null : stats.getFs(); + ProtobufOsStats osStats = stats == null ? null : stats.getOs(); + ProtobufProcessStats processStats = stats == null ? null : stats.getProcess(); + ProtobufNodeIndicesStats indicesStats = stats == null ? null : stats.getIndices(); + + table.startRow(); + + table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4)); + table.addCell(info == null ? null : info.getInfo(ProtobufProcessInfo.class).getId()); + table.addCell(node.getHostAddress()); + table.addCell(node.getAddress().address().getPort()); + final ProtobufHttpInfo httpInfo = info == null ? null : info.getInfo(ProtobufHttpInfo.class); + if (httpInfo != null) { + ProtobufTransportAddress transportAddress = httpInfo.getAddress().publishAddress(); + table.addCell(NetworkAddress.format(transportAddress.address())); + } else { + table.addCell("-"); + } + + table.addCell(node.getVersion().toString()); + table.addCell(info == null ? null : info.getBuild().type().displayName()); + table.addCell(info == null ? null : info.getBuild().hash()); + table.addCell(jvmInfo == null ? null : jvmInfo.version()); + + ByteSizeValue diskTotal = null; + ByteSizeValue diskUsed = null; + ByteSizeValue diskAvailable = null; + String diskUsedPercent = null; + if (fsInfo != null) { + diskTotal = fsInfo.getTotal().getTotal(); + diskAvailable = fsInfo.getTotal().getAvailable(); + diskUsed = new ByteSizeValue(diskTotal.getBytes() - diskAvailable.getBytes()); + + double diskUsedRatio = diskTotal.getBytes() == 0 ? 1.0 : (double) diskUsed.getBytes() / diskTotal.getBytes(); + diskUsedPercent = String.format(Locale.ROOT, "%.2f", 100.0 * diskUsedRatio); + } + table.addCell(diskTotal); + table.addCell(diskUsed); + table.addCell(diskAvailable); + table.addCell(diskUsedPercent); + + table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsed()); + table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsedPercent()); + table.addCell(jvmInfo == null ? null : jvmInfo.getMem().getHeapMax()); + table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getUsed()); + table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getUsedPercent()); + table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getTotal()); + table.addCell(processStats == null ? null : processStats.getOpenFileDescriptors()); + table.addCell( + processStats == null + ? null + : calculatePercentage(processStats.getOpenFileDescriptors(), processStats.getMaxFileDescriptors()) + ); + table.addCell(processStats == null ? null : processStats.getMaxFileDescriptors()); + + table.addCell(osStats == null ? null : Short.toString(osStats.getCpu().getPercent())); + boolean hasLoadAverage = osStats != null && osStats.getCpu().getLoadAverage() != null; + table.addCell( + !hasLoadAverage || osStats.getCpu().getLoadAverage()[0] == -1 + ? null + : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0]) + ); + table.addCell( + !hasLoadAverage || osStats.getCpu().getLoadAverage()[1] == -1 + ? null + : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1]) + ); + table.addCell( + !hasLoadAverage || osStats.getCpu().getLoadAverage()[2] == -1 + ? null + : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2]) + ); + table.addCell(jvmStats == null ? null : jvmStats.getUptime()); + + final String roles; + final String allRoles; + if (node.getRoles().isEmpty()) { + roles = "-"; + allRoles = "-"; + } else { + List knownNodeRoles = node.getRoles() + .stream() + .filter(DiscoveryNodeRole::isKnownRole) + .collect(Collectors.toList()); + roles = knownNodeRoles.size() > 0 + ? knownNodeRoles.stream().map(DiscoveryNodeRole::roleNameAbbreviation).sorted().collect(Collectors.joining()) + : "-"; + allRoles = node.getRoles().stream().map(DiscoveryNodeRole::roleName).sorted().collect(Collectors.joining(",")); + } + table.addCell(roles); + table.addCell(allRoles); + table.addCell(clusterManagerId == null ? "x" : clusterManagerId.equals(node.getId()) ? "*" : "-"); + table.addCell(node.getName()); + + ProtobufCompletionStats completionStats = indicesStats == null ? null : stats.getIndices().getCompletion(); + table.addCell(completionStats == null ? null : completionStats.getSize()); + + ProtobufFieldDataStats fdStats = indicesStats == null ? null : stats.getIndices().getFieldData(); + table.addCell(fdStats == null ? null : fdStats.getMemorySize()); + table.addCell(fdStats == null ? null : fdStats.getEvictions()); + + ProtobufQueryCacheStats fcStats = indicesStats == null ? null : indicesStats.getQueryCache(); + table.addCell(fcStats == null ? null : fcStats.getMemorySize()); + table.addCell(fcStats == null ? null : fcStats.getEvictions()); + table.addCell(fcStats == null ? null : fcStats.getHitCount()); + table.addCell(fcStats == null ? null : fcStats.getMissCount()); + + ProtobufRequestCacheStats qcStats = indicesStats == null ? null : indicesStats.getRequestCache(); + table.addCell(qcStats == null ? null : qcStats.getMemorySize()); + table.addCell(qcStats == null ? null : qcStats.getEvictions()); + table.addCell(qcStats == null ? null : qcStats.getHitCount()); + table.addCell(qcStats == null ? null : qcStats.getMissCount()); + + ProtobufFlushStats flushStats = indicesStats == null ? null : indicesStats.getFlush(); + table.addCell(flushStats == null ? null : flushStats.getTotal()); + table.addCell(flushStats == null ? null : flushStats.getTotalTime()); + + ProtobufGetStats getStats = indicesStats == null ? null : indicesStats.getGet(); + table.addCell(getStats == null ? null : getStats.current()); + table.addCell(getStats == null ? null : getStats.getTime()); + table.addCell(getStats == null ? null : getStats.getCount()); + table.addCell(getStats == null ? null : getStats.getExistsTime()); + table.addCell(getStats == null ? null : getStats.getExistsCount()); + table.addCell(getStats == null ? null : getStats.getMissingTime()); + table.addCell(getStats == null ? null : getStats.getMissingCount()); + + ProtobufIndexingStats indexingStats = indicesStats == null ? null : indicesStats.getIndexing(); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCurrent()); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteTime()); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCount()); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCurrent()); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexTime()); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCount()); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexFailedCount()); + + ProtobufMergeStats mergeStats = indicesStats == null ? null : indicesStats.getMerge(); + table.addCell(mergeStats == null ? null : mergeStats.getCurrent()); + table.addCell(mergeStats == null ? null : mergeStats.getCurrentNumDocs()); + table.addCell(mergeStats == null ? null : mergeStats.getCurrentSize()); + table.addCell(mergeStats == null ? null : mergeStats.getTotal()); + table.addCell(mergeStats == null ? null : mergeStats.getTotalNumDocs()); + table.addCell(mergeStats == null ? null : mergeStats.getTotalSize()); + table.addCell(mergeStats == null ? null : mergeStats.getTotalTime()); + + ProtobufRefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh(); + table.addCell(refreshStats == null ? null : refreshStats.getTotal()); + table.addCell(refreshStats == null ? null : refreshStats.getTotalTime()); + table.addCell(refreshStats == null ? null : refreshStats.getExternalTotal()); + table.addCell(refreshStats == null ? null : refreshStats.getExternalTotalTime()); + table.addCell(refreshStats == null ? null : refreshStats.getListeners()); + + ProtobufScriptStats scriptStats = stats == null ? null : stats.getScriptStats(); + table.addCell(scriptStats == null ? null : scriptStats.getCompilations()); + table.addCell(scriptStats == null ? null : scriptStats.getCacheEvictions()); + table.addCell(scriptStats == null ? null : scriptStats.getCompilationLimitTriggered()); + + ProtobufSearchStats searchStats = indicesStats == null ? null : indicesStats.getSearch(); + table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCount()); + table.addCell(searchStats == null ? null : searchStats.getOpenContexts()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitCount()); + + ProtobufSegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments(); + table.addCell(segmentsStats == null ? null : segmentsStats.getCount()); + table.addCell(segmentsStats == null ? null : segmentsStats.getZeroMemory()); + table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMemory()); + table.addCell(segmentsStats == null ? null : segmentsStats.getVersionMapMemory()); + table.addCell(segmentsStats == null ? null : segmentsStats.getBitsetMemory()); + + table.addCell(searchStats == null ? null : searchStats.getTotal().getSuggestCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getSuggestTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getSuggestCount()); + + table.endRow(); + } + + return table; + } + + /** + * Calculate the percentage of {@code used} from the {@code max} number. + * @param used The currently used number. + * @param max The maximum number. + * @return 0 if {@code max} is <= 0. Otherwise 100 * {@code used} / {@code max}. + */ + private short calculatePercentage(long used, long max) { + return max <= 0 ? 0 : (short) ((100d * used) / max); + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index 19967097e78b9..b54c8955283a2 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -35,16 +35,11 @@ import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.opensearch.action.admin.cluster.node.info.ProtobufNodeInfo; -import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoRequest; -import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; -import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; -import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -70,7 +65,6 @@ import org.opensearch.monitor.fs.FsInfo; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.monitor.jvm.JvmStats; -import org.opensearch.monitor.jvm.ProtobufJvmInfo; import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessInfo; import org.opensearch.monitor.process.ProcessStats; @@ -115,7 +109,7 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { - final ProtobufClusterStateRequest clusterStateRequest = new ProtobufClusterStateRequest(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); if (request.hasParam("local")) { deprecationLogger.deprecate("cat_nodes_local_parameter", LOCAL_DEPRECATED_MESSAGE); @@ -124,23 +118,23 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli clusterStateRequest.clusterManagerNodeTimeout( request.paramAsTime("cluster_manager_timeout", clusterStateRequest.clusterManagerNodeTimeout()) ); - parseDeprecatedMasterTimeoutParameterProtobuf(clusterStateRequest, request, deprecationLogger, getName()); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); final boolean fullId = request.paramAsBoolean("full_id", false); - return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override - public void processResponse(final ProtobufClusterStateResponse clusterStateResponse) { - ProtobufNodesInfoRequest nodesInfoRequest = new ProtobufNodesInfoRequest(); + public void processResponse(final ClusterStateResponse clusterStateResponse) { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); nodesInfoRequest.timeout(request.param("timeout")); nodesInfoRequest.clear() .addMetrics( - ProtobufNodesInfoRequest.Metric.JVM.metricName(), - ProtobufNodesInfoRequest.Metric.OS.metricName(), - ProtobufNodesInfoRequest.Metric.PROCESS.metricName(), - ProtobufNodesInfoRequest.Metric.HTTP.metricName() + NodesInfoRequest.Metric.JVM.metricName(), + NodesInfoRequest.Metric.OS.metricName(), + NodesInfoRequest.Metric.PROCESS.metricName(), + NodesInfoRequest.Metric.HTTP.metricName() ); - client.admin().cluster().nodesInfo(nodesInfoRequest, new RestActionListener(channel) { + client.admin().cluster().nodesInfo(nodesInfoRequest, new RestActionListener(channel) { @Override - public void processResponse(final ProtobufNodesInfoResponse nodesInfoResponse) { + public void processResponse(final NodesInfoResponse nodesInfoResponse) { NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); nodesStatsRequest.timeout(request.param("timeout")); nodesStatsRequest.clear() @@ -356,8 +350,8 @@ protected Table getTableWithHeader(final RestRequest request) { Table buildTable( boolean fullId, RestRequest req, - ProtobufClusterStateResponse state, - ProtobufNodesInfoResponse nodesInfo, + ClusterStateResponse state, + NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats ) { @@ -366,10 +360,10 @@ Table buildTable( Table table = getTableWithHeader(req); for (DiscoveryNode node : nodes) { - ProtobufNodeInfo info = nodesInfo.getNodesMap().get(node.getId()); + NodeInfo info = nodesInfo.getNodesMap().get(node.getId()); NodeStats stats = nodesStats.getNodesMap().get(node.getId()); - ProtobufJvmInfo jvmInfo = info == null ? null : info.getInfo(ProtobufJvmInfo.class); + JvmInfo jvmInfo = info == null ? null : info.getInfo(JvmInfo.class); JvmStats jvmStats = stats == null ? null : stats.getJvm(); FsInfo fsInfo = stats == null ? null : stats.getFs(); OsStats osStats = stats == null ? null : stats.getOs(); diff --git a/server/src/main/java/org/opensearch/script/ProtobufScriptCacheStats.java b/server/src/main/java/org/opensearch/script/ProtobufScriptCacheStats.java new file mode 100644 index 0000000000000..cf511b4bee309 --- /dev/null +++ b/server/src/main/java/org/opensearch/script/ProtobufScriptCacheStats.java @@ -0,0 +1,152 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.script; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * Stats for script caching +* +* @opensearch.internal +* +* @deprecated This class is deprecated in favor of ProtobufScriptStats and ScriptContextStats. It is removed in OpenSearch 2.0. +*/ +@Deprecated +public class ProtobufScriptCacheStats implements ProtobufWriteable, ToXContentFragment { + private final Map context; + private final ProtobufScriptStats general; + + public ProtobufScriptCacheStats(Map context) { + this.context = Collections.unmodifiableMap(context); + this.general = null; + } + + public ProtobufScriptCacheStats(ProtobufScriptStats general) { + this.general = Objects.requireNonNull(general); + this.context = null; + } + + public ProtobufScriptCacheStats(CodedInputStream in) throws IOException { + boolean isContext = in.readBool(); + if (isContext == false) { + general = new ProtobufScriptStats(in); + context = null; + return; + } + + general = null; + int size = in.readInt32(); + Map context = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String name = in.readString(); + context.put(name, new ProtobufScriptStats(in)); + } + this.context = Collections.unmodifiableMap(context); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + if (general != null) { + out.writeBoolNoTag(false); + general.writeTo(out); + return; + } + + out.writeBoolNoTag(true); + out.writeInt32NoTag(context.size()); + for (String name : context.keySet().stream().sorted().collect(Collectors.toList())) { + out.writeStringNoTag(name); + context.get(name).writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.SCRIPT_CACHE_STATS); + builder.startObject(Fields.SUM); + if (general != null) { + builder.field(ProtobufScriptStats.Fields.COMPILATIONS, general.getCompilations()); + builder.field(ProtobufScriptStats.Fields.CACHE_EVICTIONS, general.getCacheEvictions()); + builder.field(ProtobufScriptStats.Fields.COMPILATION_LIMIT_TRIGGERED, general.getCompilationLimitTriggered()); + builder.endObject().endObject(); + return builder; + } + + ProtobufScriptStats sum = sum(); + builder.field(ProtobufScriptStats.Fields.COMPILATIONS, sum.getCompilations()); + builder.field(ProtobufScriptStats.Fields.CACHE_EVICTIONS, sum.getCacheEvictions()); + builder.field(ProtobufScriptStats.Fields.COMPILATION_LIMIT_TRIGGERED, sum.getCompilationLimitTriggered()); + builder.endObject(); + + builder.startArray(Fields.CONTEXTS); + for (String name : context.keySet().stream().sorted().collect(Collectors.toList())) { + ProtobufScriptStats stats = context.get(name); + builder.startObject(); + builder.field(Fields.CONTEXT, name); + builder.field(ProtobufScriptStats.Fields.COMPILATIONS, stats.getCompilations()); + builder.field(ProtobufScriptStats.Fields.CACHE_EVICTIONS, stats.getCacheEvictions()); + builder.field(ProtobufScriptStats.Fields.COMPILATION_LIMIT_TRIGGERED, stats.getCompilationLimitTriggered()); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + + return builder; + } + + /** + * Get the context specific stats, null if using general cache + */ + public Map getContextStats() { + return context; + } + + /** + * Get the general stats, null if using context cache + */ + public ProtobufScriptStats getGeneralStats() { + return general; + } + + /** + * The sum of all script stats, either the general stats or the sum of all stats of the context stats. + */ + public ProtobufScriptStats sum() { + if (general != null) { + return general; + } + long compilations = 0; + long cacheEvictions = 0; + long compilationLimitTriggered = 0; + for (ProtobufScriptStats stat : context.values()) { + compilations += stat.getCompilations(); + cacheEvictions += stat.getCacheEvictions(); + compilationLimitTriggered += stat.getCompilationLimitTriggered(); + } + return new ProtobufScriptStats(compilations, cacheEvictions, compilationLimitTriggered); + } + + static final class Fields { + static final String SCRIPT_CACHE_STATS = "script_cache"; + static final String CONTEXT = "context"; + static final String SUM = "sum"; + static final String CONTEXTS = "contexts"; + } +} diff --git a/server/src/main/java/org/opensearch/script/ProtobufScriptContextStats.java b/server/src/main/java/org/opensearch/script/ProtobufScriptContextStats.java new file mode 100644 index 0000000000000..52fe4f1bed339 --- /dev/null +++ b/server/src/main/java/org/opensearch/script/ProtobufScriptContextStats.java @@ -0,0 +1,91 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.script; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Stats for a script context +* +* @opensearch.internal +*/ +public class ProtobufScriptContextStats implements ProtobufWriteable, ToXContentFragment, Comparable { + private final String context; + private final long compilations; + private final long cacheEvictions; + private final long compilationLimitTriggered; + + public ProtobufScriptContextStats(String context, long compilations, long cacheEvictions, long compilationLimitTriggered) { + this.context = Objects.requireNonNull(context); + this.compilations = compilations; + this.cacheEvictions = cacheEvictions; + this.compilationLimitTriggered = compilationLimitTriggered; + } + + public ProtobufScriptContextStats(CodedInputStream in) throws IOException { + context = in.readString(); + compilations = in.readInt64(); + cacheEvictions = in.readInt64(); + compilationLimitTriggered = in.readInt64(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeStringNoTag(context); + out.writeInt64NoTag(compilations); + out.writeInt64NoTag(cacheEvictions); + out.writeInt64NoTag(compilationLimitTriggered); + } + + public String getContext() { + return context; + } + + public long getCompilations() { + return compilations; + } + + public long getCacheEvictions() { + return cacheEvictions; + } + + public long getCompilationLimitTriggered() { + return compilationLimitTriggered; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Fields.CONTEXT, getContext()); + builder.field(Fields.COMPILATIONS, getCompilations()); + builder.field(Fields.CACHE_EVICTIONS, getCacheEvictions()); + builder.field(Fields.COMPILATION_LIMIT_TRIGGERED, getCompilationLimitTriggered()); + builder.endObject(); + return builder; + } + + @Override + public int compareTo(ProtobufScriptContextStats o) { + return this.context.compareTo(o.context); + } + + static final class Fields { + static final String CONTEXT = "context"; + static final String COMPILATIONS = "compilations"; + static final String CACHE_EVICTIONS = "cache_evictions"; + static final String COMPILATION_LIMIT_TRIGGERED = "compilation_limit_triggered"; + } +} diff --git a/server/src/main/java/org/opensearch/script/ProtobufScriptStats.java b/server/src/main/java/org/opensearch/script/ProtobufScriptStats.java new file mode 100644 index 0000000000000..c0366f94748ee --- /dev/null +++ b/server/src/main/java/org/opensearch/script/ProtobufScriptStats.java @@ -0,0 +1,127 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.script; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Stats for scripts +* +* @opensearch.internal +*/ +public class ProtobufScriptStats implements ProtobufWriteable, ToXContentFragment { + private final List contextStats; + private final long compilations; + private final long cacheEvictions; + private final long compilationLimitTriggered; + + public ProtobufScriptStats(List contextStats) { + ArrayList ctxStats = new ArrayList<>(contextStats.size()); + ctxStats.addAll(contextStats); + ctxStats.sort(ProtobufScriptContextStats::compareTo); + this.contextStats = Collections.unmodifiableList(ctxStats); + long compilations = 0; + long cacheEvictions = 0; + long compilationLimitTriggered = 0; + for (ProtobufScriptContextStats stats : contextStats) { + compilations += stats.getCompilations(); + cacheEvictions += stats.getCacheEvictions(); + compilationLimitTriggered += stats.getCompilationLimitTriggered(); + } + this.compilations = compilations; + this.cacheEvictions = cacheEvictions; + this.compilationLimitTriggered = compilationLimitTriggered; + } + + public ProtobufScriptStats(long compilations, long cacheEvictions, long compilationLimitTriggered) { + this.contextStats = Collections.emptyList(); + this.compilations = compilations; + this.cacheEvictions = cacheEvictions; + this.compilationLimitTriggered = compilationLimitTriggered; + } + + public ProtobufScriptStats(ProtobufScriptContextStats context) { + this(context.getCompilations(), context.getCacheEvictions(), context.getCompilationLimitTriggered()); + } + + public ProtobufScriptStats(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + compilations = in.readInt64(); + cacheEvictions = in.readInt64(); + compilationLimitTriggered = in.readInt64(); + contextStats = protobufStreamInput.readList(ProtobufScriptContextStats::new); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + out.writeInt64NoTag(compilations); + out.writeInt64NoTag(cacheEvictions); + out.writeInt64NoTag(compilationLimitTriggered); + protobufStreamOutput.writeCollection(contextStats, (o, v) -> v.writeTo(o)); + } + + public List getContextStats() { + return contextStats; + } + + public long getCompilations() { + return compilations; + } + + public long getCacheEvictions() { + return cacheEvictions; + } + + public long getCompilationLimitTriggered() { + return compilationLimitTriggered; + } + + public ProtobufScriptCacheStats toScriptCacheStats() { + if (contextStats.isEmpty()) { + return new ProtobufScriptCacheStats(this); + } + Map contexts = new HashMap<>(contextStats.size()); + for (ProtobufScriptContextStats contextStats : contextStats) { + contexts.put(contextStats.getContext(), new ProtobufScriptStats(contextStats)); + } + return new ProtobufScriptCacheStats(contexts); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.SCRIPT_STATS); + builder.field(Fields.COMPILATIONS, compilations); + builder.field(Fields.CACHE_EVICTIONS, cacheEvictions); + builder.field(Fields.COMPILATION_LIMIT_TRIGGERED, compilationLimitTriggered); + builder.endObject(); + return builder; + } + + static final class Fields { + static final String SCRIPT_STATS = "script"; + static final String CONTEXTS = "contexts"; + static final String COMPILATIONS = "compilations"; + static final String CACHE_EVICTIONS = "cache_evictions"; + static final String COMPILATION_LIMIT_TRIGGERED = "compilation_limit_triggered"; + } +} diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/ProtobufCompletionStats.java b/server/src/main/java/org/opensearch/search/suggest/completion/ProtobufCompletionStats.java new file mode 100644 index 0000000000000..cf3f6db462f16 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/suggest/completion/ProtobufCompletionStats.java @@ -0,0 +1,96 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.search.suggest.completion; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.ProtobufFieldMemoryStats; +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Stats for completion suggester +* +* @opensearch.internal +*/ +public class ProtobufCompletionStats implements ProtobufWriteable, ToXContentFragment { + + private static final String COMPLETION = "completion"; + private static final String SIZE_IN_BYTES = "size_in_bytes"; + private static final String SIZE = "size"; + private static final String FIELDS = "fields"; + + private long sizeInBytes; + @Nullable + private ProtobufFieldMemoryStats fields; + + public ProtobufCompletionStats() {} + + public ProtobufCompletionStats(CodedInputStream in) throws IOException { + sizeInBytes = in.readInt64(); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + fields = protobufStreamInput.readOptionalWriteable(ProtobufFieldMemoryStats::new); + } + + public ProtobufCompletionStats(long size, @Nullable ProtobufFieldMemoryStats fields) { + this.sizeInBytes = size; + this.fields = fields; + } + + public long getSizeInBytes() { + return sizeInBytes; + } + + public ByteSizeValue getSize() { + return new ByteSizeValue(sizeInBytes); + } + + public ProtobufFieldMemoryStats getFields() { + return fields; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeInt64NoTag(sizeInBytes); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeOptionalWriteable(fields); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(COMPLETION); + builder.humanReadableField(SIZE_IN_BYTES, SIZE, getSize()); + if (fields != null) { + fields.toXContent(builder, FIELDS, SIZE_IN_BYTES, SIZE); + } + builder.endObject(); + return builder; + } + + public void add(ProtobufCompletionStats completion) { + if (completion == null) { + return; + } + sizeInBytes += completion.getSizeInBytes(); + if (completion.fields != null) { + if (fields == null) { + fields = completion.fields.copy(); + } else { + fields.add(completion.fields); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java index 7236fa2ebdc2f..61e80ad7f242c 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java @@ -8,25 +8,25 @@ package org.opensearch.tasks; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchSecurityException; import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; -import org.opensearch.action.support.ChannelActionListener; +import org.opensearch.action.support.ProtobufChannelActionListener; import org.opensearch.action.support.GroupedActionListener; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.EmptyTransportResponseHandler; -import org.opensearch.transport.TransportChannel; -import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportService; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.transport.ProtobufEmptyTransportResponseHandler; +import org.opensearch.transport.ProtobufTransportChannel; +import org.opensearch.transport.ProtobufTransportException; +import org.opensearch.transport.ProtobufTransportRequest; +import org.opensearch.transport.ProtobufTransportRequestHandler; +import org.opensearch.transport.ProtobufTransportResponse; +import org.opensearch.transport.ProtobufTransportService; import java.io.IOException; import java.util.Collection; @@ -40,15 +40,15 @@ public class ProtobufTaskCancellationService { public static final String BAN_PARENT_ACTION_NAME = "internal:admin/tasks/ban"; private static final Logger logger = LogManager.getLogger(ProtobufTaskCancellationService.class); - private final TransportService transportService; + private final ProtobufTransportService transportService; private final ProtobufTaskManager taskManager; - public ProtobufTaskCancellationService(TransportService transportService) { + public ProtobufTaskCancellationService(ProtobufTransportService transportService) { this.transportService = transportService; this.taskManager = transportService.getTaskManager(); transportService.registerRequestHandler( BAN_PARENT_ACTION_NAME, - ThreadPool.Names.SAME, + ProtobufThreadPool.Names.SAME, BanParentTaskRequest::new, new BanParentRequestHandler() ); @@ -58,13 +58,13 @@ private String localNodeId() { return transportService.getLocalNode().getId(); } - void cancelTaskAndDescendants(CancellableTask task, String reason, boolean waitForCompletion, ActionListener listener) { - final TaskId taskId = task.taskInfo(localNodeId(), false).getTaskId(); + void cancelTaskAndDescendants(ProtobufCancellableTask task, String reason, boolean waitForCompletion, ActionListener listener) { + final ProtobufTaskId taskId = task.taskInfo(localNodeId(), false).getTaskId(); if (task.shouldCancelChildrenOnCancellation()) { logger.trace("cancelling task [{}] and its descendants", taskId); StepListener completedListener = new StepListener<>(); GroupedActionListener groupedListener = new GroupedActionListener<>(ActionListener.map(completedListener, r -> null), 3); - Collection childrenNodes = taskManager.startBanOnChildrenNodes(task.getId(), () -> { + Collection childrenNodes = taskManager.startBanOnChildrenNodes(task.getId(), () -> { logger.trace("child tasks of parent [{}] are completed", taskId); groupedListener.onResponse(null); }); @@ -103,35 +103,35 @@ void cancelTaskAndDescendants(CancellableTask task, String reason, boolean waitF private void setBanOnNodes( String reason, boolean waitForCompletion, - CancellableTask task, - Collection childNodes, + ProtobufCancellableTask task, + Collection childNodes, ActionListener listener ) { if (childNodes.isEmpty()) { listener.onResponse(null); return; } - final TaskId taskId = new TaskId(localNodeId(), task.getId()); + final ProtobufTaskId taskId = new ProtobufTaskId(localNodeId(), task.getId()); logger.trace("cancelling child tasks of [{}] on child nodes {}", taskId, childNodes); GroupedActionListener groupedListener = new GroupedActionListener<>( ActionListener.map(listener, r -> null), childNodes.size() ); final BanParentTaskRequest banRequest = BanParentTaskRequest.createSetBanParentTaskRequest(taskId, reason, waitForCompletion); - for (DiscoveryNode node : childNodes) { + for (ProtobufDiscoveryNode node : childNodes) { transportService.sendRequest( node, BAN_PARENT_ACTION_NAME, banRequest, - new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + new ProtobufEmptyTransportResponseHandler(ProtobufThreadPool.Names.SAME) { @Override - public void handleResponse(TransportResponse.Empty response) { + public void handleResponse(ProtobufTransportResponse.Empty response) { logger.trace("sent ban for tasks with the parent [{}] to the node [{}]", taskId, node); groupedListener.onResponse(null); } @Override - public void handleException(TransportException exp) { + public void handleException(ProtobufTransportException exp) { assert ExceptionsHelper.unwrapCause(exp) instanceof OpenSearchSecurityException == false; logger.warn("Cannot send ban for tasks with the parent [{}] to the node [{}]", taskId, node); groupedListener.onFailure(exp); @@ -141,72 +141,80 @@ public void handleException(TransportException exp) { } } - private void removeBanOnNodes(CancellableTask task, Collection childNodes) { - final BanParentTaskRequest request = BanParentTaskRequest.createRemoveBanParentTaskRequest(new TaskId(localNodeId(), task.getId())); - for (DiscoveryNode node : childNodes) { + private void removeBanOnNodes(ProtobufCancellableTask task, Collection childNodes) { + final BanParentTaskRequest request = BanParentTaskRequest.createRemoveBanParentTaskRequest( + new ProtobufTaskId(localNodeId(), task.getId()) + ); + for (ProtobufDiscoveryNode node : childNodes) { logger.trace("Sending remove ban for tasks with the parent [{}] to the node [{}]", request.parentTaskId, node); - transportService.sendRequest(node, BAN_PARENT_ACTION_NAME, request, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleException(TransportException exp) { - assert ExceptionsHelper.unwrapCause(exp) instanceof OpenSearchSecurityException == false; - logger.info("failed to remove the parent ban for task {} on node {}", request.parentTaskId, node); + transportService.sendRequest( + node, + BAN_PARENT_ACTION_NAME, + request, + new ProtobufEmptyTransportResponseHandler(ProtobufThreadPool.Names.SAME) { + @Override + public void handleException(ProtobufTransportException exp) { + assert ExceptionsHelper.unwrapCause(exp) instanceof OpenSearchSecurityException == false; + logger.info("failed to remove the parent ban for task {} on node {}", request.parentTaskId, node); + } } - }); + ); } } - private static class BanParentTaskRequest extends TransportRequest { + private static class BanParentTaskRequest extends ProtobufTransportRequest { - private final TaskId parentTaskId; + private final ProtobufTaskId parentTaskId; private final boolean ban; private final boolean waitForCompletion; private final String reason; - static BanParentTaskRequest createSetBanParentTaskRequest(TaskId parentTaskId, String reason, boolean waitForCompletion) { + static BanParentTaskRequest createSetBanParentTaskRequest(ProtobufTaskId parentTaskId, String reason, boolean waitForCompletion) { return new BanParentTaskRequest(parentTaskId, reason, waitForCompletion); } - static BanParentTaskRequest createRemoveBanParentTaskRequest(TaskId parentTaskId) { + static BanParentTaskRequest createRemoveBanParentTaskRequest(ProtobufTaskId parentTaskId) { return new BanParentTaskRequest(parentTaskId); } - private BanParentTaskRequest(TaskId parentTaskId, String reason, boolean waitForCompletion) { + private BanParentTaskRequest(ProtobufTaskId parentTaskId, String reason, boolean waitForCompletion) { this.parentTaskId = parentTaskId; this.ban = true; this.reason = reason; this.waitForCompletion = waitForCompletion; } - private BanParentTaskRequest(TaskId parentTaskId) { + private BanParentTaskRequest(ProtobufTaskId parentTaskId) { this.parentTaskId = parentTaskId; this.ban = false; this.reason = null; this.waitForCompletion = false; } - private BanParentTaskRequest(StreamInput in) throws IOException { + private BanParentTaskRequest(CodedInputStream in) throws IOException { super(in); - parentTaskId = TaskId.readFromStream(in); - ban = in.readBoolean(); + parentTaskId = ProtobufTaskId.readFromStream(in); + ban = in.readBool(); reason = ban ? in.readString() : null; - waitForCompletion = in.readBoolean(); + waitForCompletion = in.readBool(); } @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeTo(CodedOutputStream out) throws IOException { super.writeTo(out); parentTaskId.writeTo(out); - out.writeBoolean(ban); + out.writeBoolNoTag(ban); if (ban) { - out.writeString(reason); + out.writeStringNoTag(reason); } - out.writeBoolean(waitForCompletion); + out.writeBoolNoTag(waitForCompletion); } } - private class BanParentRequestHandler implements TransportRequestHandler { + private class BanParentRequestHandler implements ProtobufTransportRequestHandler { @Override - public void messageReceived(final BanParentTaskRequest request, final TransportChannel channel, Task task) throws Exception { + public void messageReceived(final BanParentTaskRequest request, final ProtobufTransportChannel channel, ProtobufTask task) + throws Exception { if (request.ban) { logger.debug( "Received ban for the parent [{}] on the node [{}], reason: [{}]", @@ -214,22 +222,22 @@ public void messageReceived(final BanParentTaskRequest request, final TransportC localNodeId(), request.reason ); - final List childTasks = taskManager.setBan(request.parentTaskId, request.reason); + final List childTasks = taskManager.setBan(request.parentTaskId, request.reason); final GroupedActionListener listener = new GroupedActionListener<>( ActionListener.map( - new ChannelActionListener<>(channel, BAN_PARENT_ACTION_NAME, request), - r -> TransportResponse.Empty.INSTANCE + new ProtobufChannelActionListener<>(channel, BAN_PARENT_ACTION_NAME, request), + r -> ProtobufTransportResponse.Empty.INSTANCE ), childTasks.size() + 1 ); - for (CancellableTask childTask : childTasks) { + for (ProtobufCancellableTask childTask : childTasks) { cancelTaskAndDescendants(childTask, request.reason, request.waitForCompletion, listener); } listener.onResponse(null); } else { logger.debug("Removing ban for the parent [{}] on the node [{}]", request.parentTaskId, localNodeId()); taskManager.removeBan(request.parentTaskId); - channel.sendResponse(TransportResponse.Empty.INSTANCE); + channel.sendResponse(ProtobufTransportResponse.Empty.INSTANCE); } } } diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java index 71939b4be2bfb..e8e78dd9cde00 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java @@ -100,11 +100,11 @@ public ProtobufTaskInfo( */ @SuppressWarnings("unchecked") public ProtobufTaskInfo(CodedInputStream in) throws IOException { - protobufStreamInput = new ProtobufStreamInput(); + protobufStreamInput = new ProtobufStreamInput(in); taskId = ProtobufTaskId.readFromStream(in); type = in.readString(); action = in.readString(); - description = protobufStreamInput.readOptionalString(in); + description = protobufStreamInput.readOptionalString(); // TODO: fix this status = null; startTime = in.readInt64(); @@ -119,9 +119,9 @@ public ProtobufTaskInfo(CodedInputStream in) throws IOException { throw new IllegalArgumentException("task cannot be cancelled"); } parentTaskId = ProtobufTaskId.readFromStream(in); - headers = protobufStreamInput.readMap(CodedInputStream::readString, CodedInputStream::readString, in); + headers = protobufStreamInput.readMap(CodedInputStream::readString, CodedInputStream::readString); if (protobufStreamInput.getVersion().onOrAfter(Version.V_2_1_0)) { - resourceStats = protobufStreamInput.readOptionalWriteable(ProtobufTaskResourceStats::new, in); + resourceStats = protobufStreamInput.readOptionalWriteable(ProtobufTaskResourceStats::new); } else { resourceStats = null; } @@ -129,7 +129,7 @@ public ProtobufTaskInfo(CodedInputStream in) throws IOException { @Override public void writeTo(CodedOutputStream out) throws IOException { - protobufStreamOutput = new ProtobufStreamOutput(); + protobufStreamOutput = new ProtobufStreamOutput(out); taskId.writeTo(out); out.writeStringNoTag(type); out.writeStringNoTag(action); @@ -143,9 +143,9 @@ public void writeTo(CodedOutputStream out) throws IOException { out.writeBoolNoTag(cancelled); } parentTaskId.writeTo(out); - protobufStreamOutput.writeMap(headers, CodedOutputStream::writeStringNoTag, CodedOutputStream::writeStringNoTag, out); + protobufStreamOutput.writeMap(headers, CodedOutputStream::writeStringNoTag, CodedOutputStream::writeStringNoTag); if (protobufStreamOutput.getVersion().onOrAfter(Version.V_2_1_0)) { - protobufStreamOutput.writeOptionalWriteable(resourceStats, out); + protobufStreamOutput.writeOptionalWriteable(resourceStats); } } diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskListener.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskListener.java new file mode 100644 index 0000000000000..67602b0f90b47 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskListener.java @@ -0,0 +1,40 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +/** + * Listener for Task success or failure. +* +* @opensearch.internal +*/ +public interface ProtobufTaskListener { + /** + * Handle task response. This response may constitute a failure or a success + * but it is up to the listener to make that decision. + * + * @param task + * the task being executed. May be null if the action doesn't + * create a task + * @param response + * the response from the action that executed the task + */ + void onResponse(ProtobufTask task, Response response); + + /** + * A failure caused by an exception at some phase of the task. + * + * @param task + * the task being executed. May be null if the action doesn't + * create a task + * @param e + * the failure + */ + void onFailure(ProtobufTask task, Exception e); + +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java index 55f9213214ca5..a6e53ebabdd9e 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java @@ -22,10 +22,8 @@ import org.opensearch.action.NotifyOnceListener; import org.opensearch.cluster.ProtobufClusterChangedEvent; import org.opensearch.cluster.ProtobufClusterStateApplier; -import org.opensearch.cluster.ClusterStateApplier; import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.cluster.node.ProtobufDiscoveryNodes; -import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.SetOnce; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; @@ -105,7 +103,7 @@ public class ProtobufTaskManager implements ProtobufClusterStateApplier { private final ByteSizeValue maxHeaderSize; private final Map channelPendingTaskTrackers = ConcurrentCollections.newConcurrentMap(); - private final SetOnce cancellationService = new SetOnce<>(); + private final SetOnce cancellationService = new SetOnce<>(); private volatile boolean taskResourceConsumersEnabled; private final Set> taskResourceConsumer; @@ -138,7 +136,7 @@ public void setTaskResultsService(TaskResultsService taskResultsService) { this.taskResultsService = taskResultsService; } - public void setTaskCancellationService(TaskCancellationService taskCancellationService) { + public void setTaskCancellationService(ProtobufTaskCancellationService taskCancellationService) { this.cancellationService.set(taskCancellationService); } @@ -302,7 +300,11 @@ public ProtobufDiscoveryNode localNode() { /** * Stores the task failure */ - public void storeResult(ProtobufTask task, Exception error, ActionListener listener) { + public void storeResult( + ProtobufTask task, + Exception error, + ActionListener listener + ) { ProtobufDiscoveryNode localNode = lastDiscoveryNodes.getLocalNode(); if (localNode == null) { // too early to store anything, shouldn't really be here - just pass the error along @@ -334,7 +336,11 @@ public void onFailure(Exception e) { /** * Stores the task result */ - public void storeResult(ProtobufTask task, Response response, ActionListener listener) { + public void storeResult( + ProtobufTask task, + Response response, + ActionListener listener + ) { ProtobufDiscoveryNode localNode = lastDiscoveryNodes.getLocalNode(); if (localNode == null) { // too early to store anything, shouldn't really be here - just pass the response along @@ -740,13 +746,18 @@ protected void doRun() { } } - public void cancelTaskAndDescendants(ProtobufCancellableTask task, String reason, boolean waitForCompletion, ActionListener listener) { - final TaskCancellationService service = cancellationService.get(); + public void cancelTaskAndDescendants( + ProtobufCancellableTask task, + String reason, + boolean waitForCompletion, + ActionListener listener + ) { + final ProtobufTaskCancellationService service = cancellationService.get(); if (service != null) { service.cancelTaskAndDescendants(task, reason, waitForCompletion, listener); } else { - assert false : "TaskCancellationService is not initialized"; - throw new IllegalStateException("TaskCancellationService is not initialized"); + assert false : "ProtobufTaskCancellationService is not initialized"; + throw new IllegalStateException("ProtobufTaskCancellationService is not initialized"); } } } diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java index 8b67855d2fc98..8c165e9237047 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java @@ -50,7 +50,8 @@ public class ProtobufTaskResourceTrackingService implements RunnableTaskExecutio private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); - private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections + .newConcurrentMapLongWithAggressiveConcurrency(); private final List taskCompletionListeners = new ArrayList<>(); private final ProtobufThreadPool threadPool; private volatile boolean taskResourceTrackingEnabled; diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java index d4a2402961cc1..1c3128309f00e 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java @@ -81,11 +81,11 @@ public ProtobufTaskResult(boolean completed, ProtobufTaskInfo task, @Nullable By * Read from a stream. */ public ProtobufTaskResult(CodedInputStream in) throws IOException { - protobufStreamInput = new ProtobufStreamInput(); + protobufStreamInput = new ProtobufStreamInput(in); completed = in.readBool(); task = new ProtobufTaskInfo(in); - error = protobufStreamInput.readOptionalBytesReference(in); - response = protobufStreamInput.readOptionalBytesReference(in); + error = protobufStreamInput.readOptionalBytesReference(); + response = protobufStreamInput.readOptionalBytesReference(); } @Override diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java index d48704fa5777a..c77984a5a5953 100644 --- a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java @@ -716,7 +716,7 @@ public Info( } public Info(CodedInputStream in) throws IOException { - ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); name = in.readString(); final String typeStr = in.readString(); // Opensearch on or after 3.0.0 version doesn't know about "fixed_auto_queue_size" thread pool. Convert it to RESIZABLE. @@ -727,13 +727,13 @@ public Info(CodedInputStream in) throws IOException { } min = in.readInt32(); max = in.readInt32(); - keepAlive = protobufStreamInput.readOptionalTimeValue(in); - queueSize = protobufStreamInput.readOptionalWriteable(ProtobufSizeValue::new, in); + keepAlive = protobufStreamInput.readOptionalTimeValue(); + queueSize = protobufStreamInput.readOptionalWriteable(ProtobufSizeValue::new); } @Override public void writeTo(CodedOutputStream out) throws IOException { - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); out.writeStringNoTag(name); if (type == ThreadPoolType.RESIZABLE && protobufStreamOutput.getVersion().before(Version.V_3_0_0)) { // Opensearch on older version doesn't know about "resizable" thread pool. Convert RESIZABLE to FIXED @@ -744,8 +744,8 @@ public void writeTo(CodedOutputStream out) throws IOException { } out.writeInt32NoTag(min); out.writeInt32NoTag(max); - protobufStreamOutput.writeOptionalTimeValue(keepAlive, out); - protobufStreamOutput.writeOptionalWriteable(queueSize, out); + protobufStreamOutput.writeOptionalTimeValue(keepAlive); + protobufStreamOutput.writeOptionalWriteable(queueSize); } public String getName() { diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java index 5188abedd67ca..f4a67d306f4bc 100644 --- a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java +++ b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java @@ -34,14 +34,14 @@ public ProtobufThreadPoolInfo(List infos) { } public ProtobufThreadPoolInfo(CodedInputStream in) throws IOException { - ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(); - this.infos = Collections.unmodifiableList(protobufStreamInput.readList(ProtobufThreadPool.Info::new, in)); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + this.infos = Collections.unmodifiableList(protobufStreamInput.readList(ProtobufThreadPool.Info::new)); } @Override public void writeTo(CodedOutputStream out) throws IOException { - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); - protobufStreamOutput.writeCollection(infos, (o, v) -> v.writeTo(o), out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeCollection(infos, (o, v) -> v.writeTo(o)); } @Override diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolStats.java b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolStats.java new file mode 100644 index 0000000000000..b4c8c4d612cd8 --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolStats.java @@ -0,0 +1,191 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.threadpool; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +/** + * Stats for a threadpool +* +* @opensearch.internal +*/ +public class ProtobufThreadPoolStats implements ProtobufWriteable, ToXContentFragment, Iterable { + + /** + * The statistics. + * + * @opensearch.internal + */ + public static class Stats implements ProtobufWriteable, ToXContentFragment, Comparable { + + private final String name; + private final int threads; + private final int queue; + private final int active; + private final long rejected; + private final int largest; + private final long completed; + + public Stats(String name, int threads, int queue, int active, long rejected, int largest, long completed) { + this.name = name; + this.threads = threads; + this.queue = queue; + this.active = active; + this.rejected = rejected; + this.largest = largest; + this.completed = completed; + } + + public Stats(CodedInputStream in) throws IOException { + name = in.readString(); + threads = in.readInt32(); + queue = in.readInt32(); + active = in.readInt32(); + rejected = in.readInt64(); + largest = in.readInt32(); + completed = in.readInt64(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeStringNoTag(name); + out.writeInt32NoTag(threads); + out.writeInt32NoTag(queue); + out.writeInt32NoTag(active); + out.writeInt64NoTag(rejected); + out.writeInt32NoTag(largest); + out.writeInt64NoTag(completed); + } + + public String getName() { + return this.name; + } + + public int getThreads() { + return this.threads; + } + + public int getQueue() { + return this.queue; + } + + public int getActive() { + return this.active; + } + + public long getRejected() { + return rejected; + } + + public int getLargest() { + return largest; + } + + public long getCompleted() { + return this.completed; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(name); + if (threads != -1) { + builder.field(Fields.THREADS, threads); + } + if (queue != -1) { + builder.field(Fields.QUEUE, queue); + } + if (active != -1) { + builder.field(Fields.ACTIVE, active); + } + if (rejected != -1) { + builder.field(Fields.REJECTED, rejected); + } + if (largest != -1) { + builder.field(Fields.LARGEST, largest); + } + if (completed != -1) { + builder.field(Fields.COMPLETED, completed); + } + builder.endObject(); + return builder; + } + + @Override + public int compareTo(Stats other) { + if ((getName() == null) && (other.getName() == null)) { + return 0; + } else if ((getName() != null) && (other.getName() == null)) { + return 1; + } else if (getName() == null) { + return -1; + } else { + int compare = getName().compareTo(other.getName()); + if (compare == 0) { + compare = Integer.compare(getThreads(), other.getThreads()); + } + return compare; + } + } + } + + private List stats; + + public ProtobufThreadPoolStats(List stats) { + Collections.sort(stats); + this.stats = stats; + } + + public ProtobufThreadPoolStats(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + stats = protobufStreamInput.readList(Stats::new); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeCollection(stats, (o, v) -> v.writeTo(o)); + } + + @Override + public Iterator iterator() { + return stats.iterator(); + } + + static final class Fields { + static final String THREAD_POOL = "thread_pool"; + static final String THREADS = "threads"; + static final String QUEUE = "queue"; + static final String ACTIVE = "active"; + static final String REJECTED = "rejected"; + static final String LARGEST = "largest"; + static final String COMPLETED = "completed"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(Fields.THREAD_POOL); + for (Stats stat : stats) { + stat.toXContent(builder, params); + } + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufActionTransportException.java b/server/src/main/java/org/opensearch/transport/ProtobufActionTransportException.java new file mode 100644 index 0000000000000..495bd02f4b070 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufActionTransportException.java @@ -0,0 +1,88 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.transport.ProtobufTransportAddress; + +import java.io.IOException; + +/** + * An action invocation failure. +* +* @opensearch.internal +*/ +public class ProtobufActionTransportException extends ProtobufTransportException { + + private final ProtobufTransportAddress address; + + private final String action; + + public ProtobufActionTransportException(CodedInputStream in) throws IOException { + super(in); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + address = protobufStreamInput.readOptionalWriteable(ProtobufTransportAddress::new); + action = protobufStreamInput.readOptionalString(); + } + + public ProtobufActionTransportException(String name, ProtobufTransportAddress address, String action, Throwable cause) { + super(buildMessage(name, address, action, null), cause); + this.address = address; + this.action = action; + } + + public ProtobufActionTransportException(String name, ProtobufTransportAddress address, String action, String msg, Throwable cause) { + super(buildMessage(name, address, action, msg), cause); + this.address = address; + this.action = action; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeOptionalWriteable(address); + protobufStreamOutput.writeOptionalString(action); + } + + /** + * The target address to invoke the action on. + */ + public ProtobufTransportAddress address() { + return address; + } + + /** + * The action to invoke. + */ + public String action() { + return action; + } + + private static String buildMessage(String name, ProtobufTransportAddress address, String action, String msg) { + StringBuilder sb = new StringBuilder(); + if (name != null) { + sb.append('[').append(name).append(']'); + } + if (address != null) { + sb.append('[').append(address).append(']'); + } + if (action != null) { + sb.append('[').append(action).append(']'); + } + if (msg != null) { + sb.append(" ").append(msg); + } + return sb.toString(); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufBytesTransportRequest.java b/server/src/main/java/org/opensearch/transport/ProtobufBytesTransportRequest.java new file mode 100644 index 0000000000000..ff684a7909915 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufBytesTransportRequest.java @@ -0,0 +1,66 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.Version; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; + +import java.io.IOException; + +/** + * A specialized, bytes only request, that can potentially be optimized on the network +* layer, specifically for the same large buffer send to several nodes. +* +* @opensearch.internal +*/ +public class ProtobufBytesTransportRequest extends ProtobufTransportRequest { + + BytesReference bytes; + Version version; + + public ProtobufBytesTransportRequest(CodedInputStream in) throws IOException { + super(in); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + bytes = protobufStreamInput.readBytesReference(); + version = protobufStreamInput.getVersion(); + } + + public ProtobufBytesTransportRequest(BytesReference bytes, Version version) { + this.bytes = bytes; + this.version = version; + } + + public Version version() { + return this.version; + } + + public BytesReference bytes() { + return this.bytes; + } + + /** + * Writes the data in a "thin" manner, without the actual bytes, assumes + * the actual bytes will be appended right after this content. + */ + public void writeThin(CodedOutputStream out) throws IOException { + super.writeTo(out); + out.writeInt32NoTag(bytes.length()); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + out.writeByteArrayNoTag(BytesReference.toBytes(bytes)); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java b/server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java new file mode 100644 index 0000000000000..d8a3a7e87c80b --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java @@ -0,0 +1,290 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.concurrent.ListenableFuture; +import org.opensearch.common.util.concurrent.RunOnce; +import org.opensearch.common.util.io.IOUtils; + +import java.util.Collections; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This class manages node connections within a cluster. The connection is opened by the underlying transport. +* Once the connection is opened, this class manages the connection. This includes closing the connection when +* the connection manager is closed. +* +* @opensearch.internal +*/ +public class ProtobufClusterConnectionManager implements ProtobufConnectionManager { + + private static final Logger logger = LogManager.getLogger(ProtobufClusterConnectionManager.class); + + private final ConcurrentMap connectedNodes = ConcurrentCollections + .newConcurrentMap(); + private final ConcurrentMap> pendingConnections = ConcurrentCollections + .newConcurrentMap(); + private final AbstractRefCounted connectingRefCounter = new AbstractRefCounted("connection manager") { + @Override + protected void closeInternal() { + Iterator> iterator = connectedNodes.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry next = iterator.next(); + try { + IOUtils.closeWhileHandlingException(next.getValue()); + } finally { + iterator.remove(); + } + } + closeLatch.countDown(); + } + }; + private final ProtobufTransport transport; + private final ProtobufConnectionProfile defaultProfile; + private final AtomicBoolean closing = new AtomicBoolean(false); + private final CountDownLatch closeLatch = new CountDownLatch(1); + private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); + + public ProtobufClusterConnectionManager(Settings settings, ProtobufTransport transport) { + this(ProtobufConnectionProfile.buildDefaultConnectionProfile(settings), transport); + } + + public ProtobufClusterConnectionManager(ProtobufConnectionProfile connectionProfile, ProtobufTransport transport) { + this.transport = transport; + this.defaultProfile = connectionProfile; + } + + @Override + public void addListener(ProtobufTransportConnectionListener listener) { + this.connectionListener.addListener(listener); + } + + @Override + public void removeListener(ProtobufTransportConnectionListener listener) { + this.connectionListener.removeListener(listener); + } + + @Override + public void openConnection( + ProtobufDiscoveryNode node, + ProtobufConnectionProfile connectionProfile, + ActionListener listener + ) { + ProtobufConnectionProfile resolvedProfile = ProtobufConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile); + internalOpenConnection(node, resolvedProfile, listener); + } + + /** + * Connects to a node with the given connection profile. If the node is already connected this method has no effect. + * Once a successful is established, it can be validated before being exposed. + * The ActionListener will be called on the calling thread or the generic thread pool. + */ + @Override + public void connectToNode( + ProtobufDiscoveryNode node, + ProtobufConnectionProfile connectionProfile, + ConnectionValidator connectionValidator, + ActionListener listener + ) throws ProtobufConnectTransportException { + ProtobufConnectionProfile resolvedProfile = ProtobufConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile); + if (node == null) { + listener.onFailure(new ProtobufConnectTransportException(null, "can't connect to a null node")); + return; + } + + if (connectingRefCounter.tryIncRef() == false) { + listener.onFailure(new IllegalStateException("connection manager is closed")); + return; + } + + if (connectedNodes.containsKey(node)) { + connectingRefCounter.decRef(); + listener.onResponse(null); + return; + } + + final ListenableFuture currentListener = new ListenableFuture<>(); + final ListenableFuture existingListener = pendingConnections.putIfAbsent(node, currentListener); + if (existingListener != null) { + try { + // wait on previous entry to complete connection attempt + existingListener.addListener(listener, OpenSearchExecutors.newDirectExecutorService()); + } finally { + connectingRefCounter.decRef(); + } + return; + } + + currentListener.addListener(listener, OpenSearchExecutors.newDirectExecutorService()); + + final RunOnce releaseOnce = new RunOnce(connectingRefCounter::decRef); + internalOpenConnection(node, resolvedProfile, ActionListener.wrap(conn -> { + connectionValidator.validate(conn, resolvedProfile, ActionListener.wrap(ignored -> { + assert Transports.assertNotTransportThread("connection validator success"); + try { + if (connectedNodes.putIfAbsent(node, conn) != null) { + logger.debug("existing connection to node [{}], closing new redundant connection", node); + IOUtils.closeWhileHandlingException(conn); + } else { + logger.debug("connected to node [{}]", node); + try { + connectionListener.onNodeConnected(node, conn); + } finally { + final ProtobufTransport.Connection finalConnection = conn; + conn.addCloseListener(ActionListener.wrap(() -> { + logger.trace("unregistering {} after connection close and marking as disconnected", node); + connectedNodes.remove(node, finalConnection); + connectionListener.onNodeDisconnected(node, conn); + })); + } + } + } finally { + ListenableFuture future = pendingConnections.remove(node); + assert future == currentListener : "Listener in pending map is different than the expected listener"; + releaseOnce.run(); + future.onResponse(null); + } + }, e -> { + assert Transports.assertNotTransportThread("connection validator failure"); + IOUtils.closeWhileHandlingException(conn); + failConnectionListeners(node, releaseOnce, e, currentListener); + })); + }, e -> { + assert Transports.assertNotTransportThread("internalOpenConnection failure"); + failConnectionListeners(node, releaseOnce, e, currentListener); + })); + } + + /** + * Returns a connection for the given node if the node is connected. + * Connections returned from this method must not be closed. The lifecycle of this connection is + * maintained by this connection manager + * + * @throws ProtobufNodeNotConnectedException if the node is not connected + * @see #connectToNode(ProtobufDiscoveryNode, ProtobufConnectionProfile, ConnectionValidator, ActionListener) + */ + @Override + public ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode node) { + ProtobufTransport.Connection connection = connectedNodes.get(node); + if (connection == null) { + throw new ProtobufNodeNotConnectedException(node, "Node not connected"); + } + return connection; + } + + /** + * Returns {@code true} if the node is connected. + */ + @Override + public boolean nodeConnected(ProtobufDiscoveryNode node) { + return connectedNodes.containsKey(node); + } + + /** + * Disconnected from the given node, if not connected, will do nothing. + */ + @Override + public void disconnectFromNode(ProtobufDiscoveryNode node) { + ProtobufTransport.Connection nodeChannels = connectedNodes.remove(node); + if (nodeChannels != null) { + // if we found it and removed it we close + nodeChannels.close(); + } + } + + /** + * Returns the number of nodes this manager is connected to. + */ + @Override + public int size() { + return connectedNodes.size(); + } + + @Override + public Set getAllConnectedNodes() { + return Collections.unmodifiableSet(connectedNodes.keySet()); + } + + @Override + public void close() { + internalClose(true); + } + + @Override + public void closeNoBlock() { + internalClose(false); + } + + private void internalClose(boolean waitForPendingConnections) { + assert Transports.assertNotTransportThread("Closing ProtobufConnectionManager"); + if (closing.compareAndSet(false, true)) { + connectingRefCounter.decRef(); + if (waitForPendingConnections) { + try { + closeLatch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException(e); + } + } + } + } + + private void internalOpenConnection( + ProtobufDiscoveryNode node, + ProtobufConnectionProfile connectionProfile, + ActionListener listener + ) { + transport.openConnection(node, connectionProfile, ActionListener.map(listener, connection -> { + assert Transports.assertNotTransportThread("internalOpenConnection success"); + try { + connectionListener.onConnectionOpened(connection); + } finally { + connection.addCloseListener(ActionListener.wrap(() -> connectionListener.onConnectionClosed(connection))); + } + if (connection.isClosed()) { + throw new ProtobufConnectTransportException(node, "a channel closed while connecting"); + } + return connection; + })); + } + + private void failConnectionListeners( + ProtobufDiscoveryNode node, + RunOnce releaseOnce, + Exception e, + ListenableFuture expectedListener + ) { + ListenableFuture future = pendingConnections.remove(node); + releaseOnce.run(); + if (future != null) { + assert future == expectedListener : "Listener in pending map is different than the expected listener"; + future.onFailure(e); + } + } + + @Override + public ProtobufConnectionProfile getConnectionProfile() { + return defaultProfile; + } + +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufConnectTransportException.java b/server/src/main/java/org/opensearch/transport/ProtobufConnectTransportException.java new file mode 100644 index 0000000000000..ebb7e37b7010b --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufConnectTransportException.java @@ -0,0 +1,61 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; + +import java.io.IOException; + +/** + * Transport connection exception +* +* @opensearch.internal +*/ +public class ProtobufConnectTransportException extends ProtobufActionTransportException { + + private final ProtobufDiscoveryNode node; + + public ProtobufConnectTransportException(ProtobufDiscoveryNode node, String msg) { + this(node, msg, null, null); + } + + public ProtobufConnectTransportException(ProtobufDiscoveryNode node, String msg, String action) { + this(node, msg, action, null); + } + + public ProtobufConnectTransportException(ProtobufDiscoveryNode node, String msg, Throwable cause) { + this(node, msg, null, cause); + } + + public ProtobufConnectTransportException(ProtobufDiscoveryNode node, String msg, String action, Throwable cause) { + super(node == null ? null : node.getName(), node == null ? null : node.getAddress(), action, msg, cause); + this.node = node; + } + + public ProtobufConnectTransportException(CodedInputStream in) throws IOException { + super(in); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + node = protobufStreamInput.readOptionalWriteable(ProtobufDiscoveryNode::new); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeOptionalWriteable(node); + } + + public ProtobufDiscoveryNode node() { + return node; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java b/server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java new file mode 100644 index 0000000000000..8e94a43d53d37 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java @@ -0,0 +1,114 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; + +import java.io.Closeable; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; + +/** + * ProtobufTransport connection manager. +* +* @opensearch.internal +*/ +public interface ProtobufConnectionManager extends Closeable { + + void addListener(ProtobufTransportConnectionListener listener); + + void removeListener(ProtobufTransportConnectionListener listener); + + void openConnection( + ProtobufDiscoveryNode node, + ProtobufConnectionProfile connectionProfile, + ActionListener listener + ); + + void connectToNode( + ProtobufDiscoveryNode node, + ProtobufConnectionProfile connectionProfile, + ConnectionValidator connectionValidator, + ActionListener listener + ) throws ConnectTransportException; + + ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode node); + + boolean nodeConnected(ProtobufDiscoveryNode node); + + void disconnectFromNode(ProtobufDiscoveryNode node); + + Set getAllConnectedNodes(); + + int size(); + + @Override + void close(); + + void closeNoBlock(); + + ProtobufConnectionProfile getConnectionProfile(); + + /** + * Validates a connection + * + * @opensearch.internal + */ + @FunctionalInterface + interface ConnectionValidator { + void validate(ProtobufTransport.Connection connection, ProtobufConnectionProfile profile, ActionListener listener); + } + + /** + * Connection listener for a delegating node + * + * @opensearch.internal + */ + final class DelegatingNodeConnectionListener implements ProtobufTransportConnectionListener { + + private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); + + @Override + public void onNodeDisconnected(ProtobufDiscoveryNode key, ProtobufTransport.Connection connection) { + for (ProtobufTransportConnectionListener listener : listeners) { + listener.onNodeDisconnected(key, connection); + } + } + + @Override + public void onNodeConnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) { + for (ProtobufTransportConnectionListener listener : listeners) { + listener.onNodeConnected(node, connection); + } + } + + @Override + public void onConnectionOpened(ProtobufTransport.Connection connection) { + for (ProtobufTransportConnectionListener listener : listeners) { + listener.onConnectionOpened(connection); + } + } + + @Override + public void onConnectionClosed(ProtobufTransport.Connection connection) { + for (ProtobufTransportConnectionListener listener : listeners) { + listener.onConnectionClosed(connection); + } + } + + public void addListener(ProtobufTransportConnectionListener listener) { + listeners.addIfAbsent(listener); + } + + public void removeListener(ProtobufTransportConnectionListener listener) { + listeners.remove(listener); + } + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufConnectionProfile.java b/server/src/main/java/org/opensearch/transport/ProtobufConnectionProfile.java new file mode 100644 index 0000000000000..a2e10d03bd189 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufConnectionProfile.java @@ -0,0 +1,362 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.common.Nullable; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * A connection profile describes how many connection are established to specific node for each of the available request types. +* ({@link org.opensearch.transport.TransportRequestOptions.Type}). This allows to tailor a connection towards a specific usage. +* +* @opensearch.internal +*/ +public final class ProtobufConnectionProfile { + + /** + * takes a {@link ProtobufConnectionProfile} resolves it to a fully specified (i.e., no nulls) profile + */ + public static ProtobufConnectionProfile resolveConnectionProfile( + @Nullable ProtobufConnectionProfile profile, + ProtobufConnectionProfile fallbackProfile + ) { + Objects.requireNonNull(fallbackProfile); + if (profile == null) { + return fallbackProfile; + } else if (profile.getConnectTimeout() != null + && profile.getHandshakeTimeout() != null + && profile.getPingInterval() != null + && profile.getCompressionEnabled() != null) { + return profile; + } else { + ProtobufConnectionProfile.Builder builder = new ProtobufConnectionProfile.Builder(profile); + if (profile.getConnectTimeout() == null) { + builder.setConnectTimeout(fallbackProfile.getConnectTimeout()); + } + if (profile.getHandshakeTimeout() == null) { + builder.setHandshakeTimeout(fallbackProfile.getHandshakeTimeout()); + } + if (profile.getPingInterval() == null) { + builder.setPingInterval(fallbackProfile.getPingInterval()); + } + if (profile.getCompressionEnabled() == null) { + builder.setCompressionEnabled(fallbackProfile.getCompressionEnabled()); + } + return builder.build(); + } + } + + /** + * Builds a default connection profile based on the provided settings. + * + * @param settings to build the connection profile from + * @return the connection profile + */ + public static ProtobufConnectionProfile buildDefaultConnectionProfile(Settings settings) { + int connectionsPerNodeRecovery = TransportSettings.CONNECTIONS_PER_NODE_RECOVERY.get(settings); + int connectionsPerNodeBulk = TransportSettings.CONNECTIONS_PER_NODE_BULK.get(settings); + int connectionsPerNodeReg = TransportSettings.CONNECTIONS_PER_NODE_REG.get(settings); + int connectionsPerNodeState = TransportSettings.CONNECTIONS_PER_NODE_STATE.get(settings); + int connectionsPerNodePing = TransportSettings.CONNECTIONS_PER_NODE_PING.get(settings); + Builder builder = new Builder(); + builder.setConnectTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)); + builder.setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)); + builder.setPingInterval(TransportSettings.PING_SCHEDULE.get(settings)); + builder.setCompressionEnabled(TransportSettings.TRANSPORT_COMPRESS.get(settings)); + builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); + builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); + // if we are not cluster-manager eligible we don't need a dedicated channel to publish the state + builder.addConnections( + ProtobufDiscoveryNode.isClusterManagerNode(settings) ? connectionsPerNodeState : 0, + TransportRequestOptions.Type.STATE + ); + // if we are not a data-node we don't need any dedicated channels for recovery + builder.addConnections( + ProtobufDiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, + TransportRequestOptions.Type.RECOVERY + ); + builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG); + return builder.build(); + } + + /** + * Builds a connection profile that is dedicated to a single channel type. Use this + * when opening single use connections + */ + public static ProtobufConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType) { + return buildSingleChannelProfile(channelType, null, null, null, null); + } + + /** + * Builds a connection profile that is dedicated to a single channel type. Allows passing connection and + * handshake timeouts and compression settings. + */ + public static ProtobufConnectionProfile buildSingleChannelProfile( + TransportRequestOptions.Type channelType, + @Nullable TimeValue connectTimeout, + @Nullable TimeValue handshakeTimeout, + @Nullable TimeValue pingInterval, + @Nullable Boolean compressionEnabled + ) { + Builder builder = new Builder(); + builder.addConnections(1, channelType); + final EnumSet otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class); + otherTypes.remove(channelType); + builder.addConnections(0, otherTypes.toArray(new TransportRequestOptions.Type[0])); + if (connectTimeout != null) { + builder.setConnectTimeout(connectTimeout); + } + if (handshakeTimeout != null) { + builder.setHandshakeTimeout(handshakeTimeout); + } + if (pingInterval != null) { + builder.setPingInterval(pingInterval); + } + if (compressionEnabled != null) { + builder.setCompressionEnabled(compressionEnabled); + } + return builder.build(); + } + + private final List handles; + private final int numConnections; + private final TimeValue connectTimeout; + private final TimeValue handshakeTimeout; + private final TimeValue pingInterval; + private final Boolean compressionEnabled; + + private ProtobufConnectionProfile( + List handles, + int numConnections, + TimeValue connectTimeout, + TimeValue handshakeTimeout, + TimeValue pingInterval, + Boolean compressionEnabled + ) { + this.handles = handles; + this.numConnections = numConnections; + this.connectTimeout = connectTimeout; + this.handshakeTimeout = handshakeTimeout; + this.pingInterval = pingInterval; + this.compressionEnabled = compressionEnabled; + } + + /** + * A builder to build a new {@link ProtobufConnectionProfile} + * + * @opensearch.internal + */ + public static class Builder { + private final List handles = new ArrayList<>(); + private final Set addedTypes = EnumSet.noneOf(TransportRequestOptions.Type.class); + private int numConnections = 0; + private TimeValue connectTimeout; + private TimeValue handshakeTimeout; + private Boolean compressionEnabled; + private TimeValue pingInterval; + + /** create an empty builder */ + public Builder() {} + + /** copy constructor, using another profile as a base */ + public Builder(ProtobufConnectionProfile source) { + handles.addAll(source.getHandles()); + numConnections = source.getNumConnections(); + handles.forEach(th -> addedTypes.addAll(th.types)); + connectTimeout = source.getConnectTimeout(); + handshakeTimeout = source.getHandshakeTimeout(); + compressionEnabled = source.getCompressionEnabled(); + pingInterval = source.getPingInterval(); + } + + /** + * Sets a connect timeout for this connection profile + */ + public Builder setConnectTimeout(TimeValue connectTimeout) { + if (connectTimeout.millis() < 0) { + throw new IllegalArgumentException("connectTimeout must be non-negative but was: " + connectTimeout); + } + this.connectTimeout = connectTimeout; + return this; + } + + /** + * Sets a handshake timeout for this connection profile + */ + public Builder setHandshakeTimeout(TimeValue handshakeTimeout) { + if (handshakeTimeout.millis() < 0) { + throw new IllegalArgumentException("handshakeTimeout must be non-negative but was: " + handshakeTimeout); + } + this.handshakeTimeout = handshakeTimeout; + return this; + } + + /** + * Sets a ping interval for this connection profile + */ + public Builder setPingInterval(TimeValue pingInterval) { + this.pingInterval = pingInterval; + return this; + } + + /** + * Sets compression enabled for this connection profile + */ + public Builder setCompressionEnabled(boolean compressionEnabled) { + this.compressionEnabled = compressionEnabled; + return this; + } + + /** + * Adds a number of connections for one or more types. Each type can only be added once. + * @param numConnections the number of connections to use in the pool for the given connection types + * @param types a set of types that should share the given number of connections + */ + public Builder addConnections(int numConnections, TransportRequestOptions.Type... types) { + if (types == null || types.length == 0) { + throw new IllegalArgumentException("types must not be null"); + } + for (TransportRequestOptions.Type type : types) { + if (addedTypes.contains(type)) { + throw new IllegalArgumentException("type [" + type + "] is already registered"); + } + } + addedTypes.addAll(Arrays.asList(types)); + handles.add(new ConnectionTypeHandle(this.numConnections, numConnections, EnumSet.copyOf(Arrays.asList(types)))); + this.numConnections += numConnections; + return this; + } + + /** + * Creates a new {@link ProtobufConnectionProfile} based on the added connections. + * @throws IllegalStateException if any of the {@link org.opensearch.transport.TransportRequestOptions.Type} enum is missing + */ + public ProtobufConnectionProfile build() { + EnumSet types = EnumSet.allOf(TransportRequestOptions.Type.class); + types.removeAll(addedTypes); + if (types.isEmpty() == false) { + throw new IllegalStateException("not all types are added for this connection profile - missing types: " + types); + } + return new ProtobufConnectionProfile( + Collections.unmodifiableList(handles), + numConnections, + connectTimeout, + handshakeTimeout, + pingInterval, + compressionEnabled + ); + } + + } + + /** + * Returns the connect timeout or null if no explicit timeout is set on this profile. + */ + public TimeValue getConnectTimeout() { + return connectTimeout; + } + + /** + * Returns the handshake timeout or null if no explicit timeout is set on this profile. + */ + public TimeValue getHandshakeTimeout() { + return handshakeTimeout; + } + + /** + * Returns the ping interval or null if no explicit ping interval is set on this profile. + */ + public TimeValue getPingInterval() { + return pingInterval; + } + + /** + * Returns boolean indicating if compression is enabled or null if no explicit compression + * is set on this profile. + */ + public Boolean getCompressionEnabled() { + return compressionEnabled; + } + + /** + * Returns the total number of connections for this profile + */ + public int getNumConnections() { + return numConnections; + } + + /** + * Returns the number of connections per type for this profile. This might return a count that is shared with other types such + * that the sum of all connections per type might be higher than {@link #getNumConnections()}. For instance if + * {@link org.opensearch.transport.TransportRequestOptions.Type#BULK} shares connections with + * {@link org.opensearch.transport.TransportRequestOptions.Type#REG} they will return both the same number of connections from + * this method but the connections are not distinct. + */ + public int getNumConnectionsPerType(TransportRequestOptions.Type type) { + for (ConnectionTypeHandle handle : handles) { + if (handle.getTypes().contains(type)) { + return handle.length; + } + } + throw new AssertionError("no handle found for type: " + type); + } + + /** + * Returns the type handles for this connection profile + */ + List getHandles() { + return Collections.unmodifiableList(handles); + } + + /** + * Connection type handle encapsulates the logic which connection + */ + static final class ConnectionTypeHandle { + public final int length; + public final int offset; + private final Set types; + private final AtomicInteger counter = new AtomicInteger(); + + private ConnectionTypeHandle(int offset, int length, Set types) { + this.length = length; + this.offset = offset; + this.types = types; + } + + /** + * Returns one of the channels out configured for this handle. The channel is selected in a round-robin + * fashion. + */ + T getChannel(List channels) { + if (length == 0) { + throw new IllegalStateException("can't select channel size is 0 for types: " + types); + } + assert channels.size() >= offset + length : "illegal size: " + channels.size() + " expected >= " + (offset + length); + return channels.get(offset + Math.floorMod(counter.incrementAndGet(), length)); + } + + /** + * Returns all types for this handle + */ + Set getTypes() { + return types; + } + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufEmptyTransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/ProtobufEmptyTransportResponseHandler.java new file mode 100644 index 0000000000000..53286266bb015 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufEmptyTransportResponseHandler.java @@ -0,0 +1,46 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.threadpool.ThreadPool; + +/** + * Handler for empty transport response +* +* @opensearch.internal +*/ +public class ProtobufEmptyTransportResponseHandler implements ProtobufTransportResponseHandler { + + public static final ProtobufEmptyTransportResponseHandler INSTANCE_SAME = new ProtobufEmptyTransportResponseHandler( + ThreadPool.Names.SAME + ); + + private final String executor; + + public ProtobufEmptyTransportResponseHandler(String executor) { + this.executor = executor; + } + + @Override + public ProtobufTransportResponse.Empty read(CodedInputStream in) { + return ProtobufTransportResponse.Empty.INSTANCE; + } + + @Override + public void handleResponse(ProtobufTransportResponse.Empty response) {} + + @Override + public void handleException(ProtobufTransportException exp) {} + + @Override + public String executor() { + return executor; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufNetworkMessage.java b/server/src/main/java/org/opensearch/transport/ProtobufNetworkMessage.java index 89fa327ecc0dd..489e7a42f767f 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufNetworkMessage.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufNetworkMessage.java @@ -26,7 +26,7 @@ public abstract class ProtobufNetworkMessage { protected final byte status; ProtobufNetworkMessage(ThreadContext threadContext, Version version, byte status, long requestId) { - this.threadContext = (ProtobufWriteable) threadContext.captureAsWriteable(); + this.threadContext = (ProtobufWriteable) threadContext.captureAsProtobufWriteable(); this.version = version; this.requestId = requestId; this.status = status; diff --git a/server/src/main/java/org/opensearch/transport/ProtobufNodeDisconnectedException.java b/server/src/main/java/org/opensearch/transport/ProtobufNodeDisconnectedException.java new file mode 100644 index 0000000000000..7a46892128218 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufNodeDisconnectedException.java @@ -0,0 +1,37 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; + +import java.io.IOException; + +/** + * Exception thrown if a node disconnects +* +* @opensearch.internal +*/ +public class ProtobufNodeDisconnectedException extends ProtobufConnectTransportException { + + public ProtobufNodeDisconnectedException(ProtobufDiscoveryNode node, String action) { + super(node, "disconnected", action, null); + } + + public ProtobufNodeDisconnectedException(CodedInputStream in) throws IOException { + super(in); + } + + // stack trace is meaningless... + + @Override + public Throwable fillInStackTrace() { + return this; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufNodeNotConnectedException.java b/server/src/main/java/org/opensearch/transport/ProtobufNodeNotConnectedException.java new file mode 100644 index 0000000000000..9b90f26324fb3 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufNodeNotConnectedException.java @@ -0,0 +1,30 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; + +import java.io.IOException; + +/** + * An exception indicating that a message is sent to a node that is not connected. +* +* @opensearch.internal +*/ +public class ProtobufNodeNotConnectedException extends ProtobufConnectTransportException { + + public ProtobufNodeNotConnectedException(ProtobufDiscoveryNode node, String msg) { + super(node, msg, (String) null); + } + + public ProtobufNodeNotConnectedException(CodedInputStream in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java b/server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java new file mode 100644 index 0000000000000..e18057f7818df --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java @@ -0,0 +1,281 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedOutputStream; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.NotifyOnceListener; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.network.CloseableChannel; +import org.opensearch.common.transport.NetworkExceptionHelper; +import org.opensearch.common.transport.ProtobufTransportAddress; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.threadpool.ProtobufThreadPool; + +import java.io.IOException; +import java.util.Set; + +/** + * Outbound data handler +* +* @opensearch.internal +*/ +final class ProtobufOutboundHandler { + + private static final Logger logger = LogManager.getLogger(ProtobufOutboundHandler.class); + + private final String nodeName; + private final Version version; + private final String[] features; + private final StatsTracker statsTracker; + private final ProtobufThreadPool threadPool; + // private final BigArrays bigArrays; + private final byte[] bytes; + private volatile ProtobufTransportMessageListener messageListener = ProtobufTransportMessageListener.NOOP_LISTENER; + + ProtobufOutboundHandler( + String nodeName, + Version version, + String[] features, + StatsTracker statsTracker, + ProtobufThreadPool threadPool, + byte[] bytes + ) { + this.nodeName = nodeName; + this.version = version; + this.features = features; + this.statsTracker = statsTracker; + this.threadPool = threadPool; + this.bytes = bytes; + } + + void sendBytes(TcpChannel channel, BytesReference bytes, ActionListener listener) { + SendContext sendContext = new SendContext(channel, () -> bytes, listener); + try { + internalSend(channel, sendContext); + } catch (IOException e) { + // This should not happen as the bytes are already serialized + throw new AssertionError(e); + } + } + + /** + * Sends the request to the given channel. This method should be used to send {@link ProtobufTransportRequest} + * objects back to the caller. + */ + void sendRequest( + final ProtobufDiscoveryNode node, + final TcpChannel channel, + final long requestId, + final String action, + final ProtobufTransportRequest request, + final TransportRequestOptions options, + final Version channelVersion, + final boolean compressRequest, + final boolean isHandshake + ) throws IOException, TransportException { + Version version = Version.min(this.version, channelVersion); + ProtobufOutboundMessage.Request message = new ProtobufOutboundMessage.Request( + threadPool.getThreadContext(), + features, + request, + version, + action, + requestId, + isHandshake, + compressRequest + ); + ActionListener listener = ActionListener.wrap(() -> messageListener.onRequestSent(node, requestId, action, request, options)); + sendMessage(channel, message, listener); + } + + /** + * Sends the response to the given channel. This method should be used to send {@link ProtobufTransportResponse} + * objects back to the caller. + * + * @see #sendErrorResponse(Version, Set, TcpChannel, long, String, Exception) for sending error responses + */ + void sendResponse( + final Version nodeVersion, + final Set features, + final TcpChannel channel, + final long requestId, + final String action, + final ProtobufTransportResponse response, + final boolean compress, + final boolean isHandshake + ) throws IOException { + Version version = Version.min(this.version, nodeVersion); + ProtobufOutboundMessage.Response message = new ProtobufOutboundMessage.Response( + threadPool.getThreadContext(), + features, + response, + version, + requestId, + isHandshake, + compress + ); + ActionListener listener = ActionListener.wrap(() -> messageListener.onResponseSent(requestId, action, response)); + sendMessage(channel, message, listener); + } + + /** + * Sends back an error response to the caller via the given channel + */ + void sendErrorResponse( + final Version nodeVersion, + final Set features, + final TcpChannel channel, + final long requestId, + final String action, + final Exception error + ) throws IOException { + Version version = Version.min(this.version, nodeVersion); + ProtobufTransportAddress address = new ProtobufTransportAddress(channel.getLocalAddress()); + ProtobufRemoteTransportException tx = new ProtobufRemoteTransportException(nodeName, address, action, error); + ProtobufOutboundMessage.Response message = new ProtobufOutboundMessage.Response( + threadPool.getThreadContext(), + features, + tx, + version, + requestId, + false, + false + ); + ActionListener listener = ActionListener.wrap(() -> messageListener.onResponseSent(requestId, action, error)); + sendMessage(channel, message, listener); + } + + private void sendMessage(TcpChannel channel, ProtobufOutboundMessage networkMessage, ActionListener listener) throws IOException { + MessageSerializer serializer = new MessageSerializer(networkMessage, bytes); + SendContext sendContext = new SendContext(channel, serializer, listener, serializer); + internalSend(channel, sendContext); + } + + private void internalSend(TcpChannel channel, SendContext sendContext) throws IOException { + channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); + BytesReference reference = sendContext.get(); + // stash thread context so that channel event loop is not polluted by thread context + try (ThreadContext.StoredContext existing = threadPool.getThreadContext().stashContext()) { + channel.sendMessage(reference, sendContext); + } catch (RuntimeException ex) { + sendContext.onFailure(ex); + CloseableChannel.closeChannel(channel); + throw ex; + } + } + + void setMessageListener(ProtobufTransportMessageListener listener) { + if (messageListener == ProtobufTransportMessageListener.NOOP_LISTENER) { + messageListener = listener; + } else { + throw new IllegalStateException("Cannot set message listener twice"); + } + } + + /** + * Internal message serializer + * + * @opensearch.internal + */ + private static class MessageSerializer implements CheckedSupplier, Releasable { + + private final ProtobufOutboundMessage message; + private byte[] bytes; + // private final BigArrays bigArrays; + // private volatile ReleasableBytesStreamOutput bytesStreamOutput; + + private MessageSerializer(ProtobufOutboundMessage message, byte[] bytes) { + this.message = message; + this.bytes = bytes; + } + + @Override + public BytesReference get() throws IOException { + CodedOutputStream out = CodedOutputStream.newInstance(bytes); + return message.serialize(out, bytes); + } + + @Override + public void close() { + // IOUtils.closeWhileHandlingException(bytesStreamOutput); + } + } + + private class SendContext extends NotifyOnceListener implements CheckedSupplier { + + private final TcpChannel channel; + private final CheckedSupplier messageSupplier; + private final ActionListener listener; + private final Releasable optionalReleasable; + private long messageSize = -1; + + private SendContext( + TcpChannel channel, + CheckedSupplier messageSupplier, + ActionListener listener + ) { + this(channel, messageSupplier, listener, null); + } + + private SendContext( + TcpChannel channel, + CheckedSupplier messageSupplier, + ActionListener listener, + Releasable optionalReleasable + ) { + this.channel = channel; + this.messageSupplier = messageSupplier; + this.listener = listener; + this.optionalReleasable = optionalReleasable; + } + + public BytesReference get() throws IOException { + BytesReference message; + try { + message = messageSupplier.get(); + messageSize = message.length(); + TransportLogger.logOutboundMessage(channel, message); + return message; + } catch (Exception e) { + onFailure(e); + throw e; + } + } + + @Override + protected void innerOnResponse(Void v) { + assert messageSize != -1 : "If onResponse is being called, the message should have been serialized"; + statsTracker.markBytesWritten(messageSize); + closeAndCallback(() -> listener.onResponse(v)); + } + + @Override + protected void innerOnFailure(Exception e) { + if (NetworkExceptionHelper.isCloseConnectionException(e)) { + logger.debug(() -> new ParameterizedMessage("send message failed [channel: {}]", channel), e); + } else { + logger.warn(() -> new ParameterizedMessage("send message failed [channel: {}]", channel), e); + } + closeAndCallback(() -> listener.onFailure(e)); + } + + private void closeAndCallback(Runnable runnable) { + Releasables.close(optionalReleasable, runnable::run); + } + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java b/server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java index 0bfc14e93d7da..a2f5fb571a1f8 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java @@ -12,14 +12,12 @@ import org.opensearch.Version; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.bytes.CompositeBytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.ProtobufStreamOutput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.common.util.concurrent.ThreadContext; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.Set; /** @@ -36,31 +34,35 @@ abstract class ProtobufOutboundMessage extends ProtobufNetworkMessage { this.message = message; } - BytesReference serialize(CodedOutputStream bytesStream) throws IOException { - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(); + BytesReference serialize(CodedOutputStream out, byte[] bytes) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); protobufStreamOutput.setVersion(version); // bytesStream.skip(TcpHeader.headerSize(version)); // The compressible bytes stream will not close the underlying bytes stream BytesReference reference; int variableHeaderLength = -1; - final long preHeaderPosition = bytesStream.position(); - writeVariableHeader(bytesStream); - variableHeaderLength = Math.toIntExact(bytesStream.position() - preHeaderPosition); + // final long preHeaderPosition = out.position(); + writeVariableHeader(out); + // variableHeaderLength = Math.toIntExact(out.position() - preHeaderPosition); - try (CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bytesStream, TransportStatus.isCompress(status))) { - stream.setVersion(version); - stream.setFeatures(bytesStream.getFeatures()); - - if (variableHeaderLength == -1) { - writeVariableHeader(stream); - } - reference = writeMessage(stream); + if (TransportStatus.isCompress(status)) { + protobufStreamOutput.setVersion(version); + protobufStreamOutput.setFeatures(protobufStreamOutput.getFeatures()); } + // try (CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(out, TransportStatus.isCompress(status))) { + // stream.setVersion(version); + // stream.setFeatures(out.getFeatures()); + + // if (variableHeaderLength == -1) { + // writeVariableHeader(stream); + // } + reference = writeMessage(out, bytes); + // } - bytesStream.seek(0); + // out.seek(0); final int contentSize = reference.length() - TcpHeader.headerSize(version); - TcpHeader.writeHeader(bytesStream, requestId, status, version, contentSize, variableHeaderLength); + TcpHeader.writeHeaderProtobuf(out, requestId, status, version, contentSize, variableHeaderLength); return reference; } @@ -68,14 +70,14 @@ protected void writeVariableHeader(CodedOutputStream stream) throws IOException threadContext.writeTo(stream); } - protected BytesReference writeMessage(CompressibleBytesOutputStream stream) throws IOException { + protected BytesReference writeMessage(CodedOutputStream stream, byte[] bytes) throws IOException { final BytesReference zeroCopyBuffer; - if (message instanceof BytesTransportRequest) { - BytesTransportRequest bRequest = (BytesTransportRequest) message; + if (message instanceof ProtobufBytesTransportRequest) { + ProtobufBytesTransportRequest bRequest = (ProtobufBytesTransportRequest) message; bRequest.writeThin(stream); zeroCopyBuffer = bRequest.bytes; - } else if (message instanceof RemoteTransportException) { - stream.writeException((RemoteTransportException) message); + } else if (message instanceof ProtobufRemoteTransportException) { + // stream.writeStringNoTag((ProtobufRemoteTransportException) message.toString()); zeroCopyBuffer = BytesArray.EMPTY; } else { message.writeTo(stream); @@ -86,12 +88,14 @@ protected BytesReference writeMessage(CompressibleBytesOutputStream stream) thro // are written. Otherwise we barf on the decompressing end when we read past EOF on purpose in the // #validateRequest method. this might be a problem in deflate after all but it's important to write // the marker bytes. - final BytesReference message = stream.materializeBytes(); - if (zeroCopyBuffer.length() == 0) { - return message; - } else { - return CompositeBytesReference.of(message, zeroCopyBuffer); - } + ByteBuffer byteBuffer = ByteBuffer.wrap(bytes); + final BytesReference message = BytesReference.fromByteBuffer(byteBuffer); + // if (zeroCopyBuffer.length() == 0) { + // return message; + // } else { + // return CompositeBytesReference.of(message, zeroCopyBuffer); + // } + return message; } /** @@ -120,10 +124,11 @@ static class Request extends ProtobufOutboundMessage { } @Override - protected void writeVariableHeader(StreamOutput stream) throws IOException { - super.writeVariableHeader(stream); - stream.writeStringArray(features); - stream.writeString(action); + protected void writeVariableHeader(CodedOutputStream out) throws IOException { + super.writeVariableHeader(out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeStringArray(features); + out.writeStringNoTag(action); } private static byte setStatus(boolean compress, boolean isHandshake, ProtobufWriteable message) { @@ -163,9 +168,10 @@ static class Response extends ProtobufOutboundMessage { } @Override - protected void writeVariableHeader(StreamOutput stream) throws IOException { - super.writeVariableHeader(stream); - stream.setFeatures(features); + protected void writeVariableHeader(CodedOutputStream out) throws IOException { + super.writeVariableHeader(out); + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.setFeatures(features); } private static byte setStatus(boolean compress, boolean isHandshake, ProtobufWriteable message) { @@ -186,6 +192,6 @@ private static byte setStatus(boolean compress, boolean isHandshake, ProtobufWri } private static boolean canCompress(ProtobufWriteable message) { - return message instanceof BytesTransportRequest == false; + return message instanceof ProtobufBytesTransportRequest == false; } } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufPlainTransportFuture.java b/server/src/main/java/org/opensearch/transport/ProtobufPlainTransportFuture.java new file mode 100644 index 0000000000000..259840e96126e --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufPlainTransportFuture.java @@ -0,0 +1,103 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.common.util.concurrent.BaseFuture; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * Future for transporting data +* +* @opensearch.internal +*/ +public class ProtobufPlainTransportFuture extends BaseFuture + implements + TransportFuture, + ProtobufTransportResponseHandler { + + private final ProtobufTransportResponseHandler handler; + + public ProtobufPlainTransportFuture(ProtobufTransportResponseHandler handler) { + this.handler = handler; + } + + @Override + public V txGet() { + try { + return get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException("Future got interrupted", e); + } catch (ExecutionException e) { + if (e.getCause() instanceof OpenSearchException) { + throw (OpenSearchException) e.getCause(); + } else { + throw new ProtobufTransportException("Failed execution", e); + } + } + } + + @Override + public V txGet(long timeout, TimeUnit unit) { + try { + return get(timeout, unit); + } catch (TimeoutException e) { + throw new OpenSearchTimeoutException(e); + } catch (InterruptedException e) { + throw new IllegalStateException("Future got interrupted", e); + } catch (ExecutionException e) { + if (e.getCause() instanceof OpenSearchException) { + throw (OpenSearchException) e.getCause(); + } else { + throw new ProtobufTransportException("Failed execution", e); + } + } + } + + @Override + public V read(CodedInputStream in) throws IOException { + return handler.read(in); + } + + @Override + public String executor() { + return handler.executor(); + } + + @Override + public void handleResponse(V response) { + try { + handler.handleResponse(response); + set(response); + } catch (Exception e) { + handleException(new ProtobufTransportException(e)); + } + } + + @Override + public void handleException(ProtobufTransportException exp) { + try { + handler.handleException(exp); + } finally { + setException(exp); + } + } + + @Override + public String toString() { + return "future(" + handler.toString() + ")"; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufProxyConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/ProtobufProxyConnectionStrategy.java new file mode 100644 index 0000000000000..52fa5f55ad1ff --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufProxyConnectionStrategy.java @@ -0,0 +1,414 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.ProtobufTransportAddress; +import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static org.opensearch.common.settings.Setting.intSetting; + +/** + * Connect through a proxy +* +* @opensearch.internal +*/ +public class ProtobufProxyConnectionStrategy extends ProtobufRemoteConnectionStrategy { + + /** + * The remote address for the proxy. The connections will be opened to the configured address. + */ + public static final Setting.AffixSetting PROXY_ADDRESS = Setting.affixKeySetting( + "cluster.remote.", + "proxy_address", + (ns, key) -> Setting.simpleString(key, new StrategyValidator<>(ns, key, ProtobufConnectionStrategy.PROXY, s -> { + if (Strings.hasLength(s)) { + parsePort(s); + } + }), Setting.Property.Dynamic, Setting.Property.NodeScope) + ); + + /** + * The maximum number of socket connections that will be established to a remote cluster. The default is 18. + */ + public static final Setting.AffixSetting REMOTE_SOCKET_CONNECTIONS = Setting.affixKeySetting( + "cluster.remote.", + "proxy_socket_connections", + (ns, key) -> intSetting( + key, + 18, + 1, + new StrategyValidator<>(ns, key, ProtobufConnectionStrategy.PROXY), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ) + ); + + /** + * A configurable server_name attribute + */ + public static final Setting.AffixSetting SERVER_NAME = Setting.affixKeySetting( + "cluster.remote.", + "server_name", + (ns, key) -> Setting.simpleString( + key, + new StrategyValidator<>(ns, key, ProtobufConnectionStrategy.PROXY), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ) + ); + + static final int CHANNELS_PER_CONNECTION = 1; + + private static final int MAX_CONNECT_ATTEMPTS_PER_RUN = 3; + + private final int maxNumConnections; + private final String configuredAddress; + private final String configuredServerName; + private final Supplier address; + private final AtomicReference remoteClusterName = new AtomicReference<>(); + private final ProtobufConnectionManager.ConnectionValidator clusterNameValidator; + + ProtobufProxyConnectionStrategy( + String clusterAlias, + ProtobufTransportService transportService, + ProtobufRemoteConnectionManager connectionManager, + Settings settings + ) { + this( + clusterAlias, + transportService, + connectionManager, + settings, + REMOTE_SOCKET_CONNECTIONS.getConcreteSettingForNamespace(clusterAlias).get(settings), + PROXY_ADDRESS.getConcreteSettingForNamespace(clusterAlias).get(settings), + SERVER_NAME.getConcreteSettingForNamespace(clusterAlias).get(settings) + ); + } + + ProtobufProxyConnectionStrategy( + String clusterAlias, + ProtobufTransportService transportService, + ProtobufRemoteConnectionManager connectionManager, + Settings settings, + int maxNumConnections, + String configuredAddress + ) { + this( + clusterAlias, + transportService, + connectionManager, + settings, + maxNumConnections, + configuredAddress, + () -> resolveAddress(configuredAddress), + null + ); + } + + ProtobufProxyConnectionStrategy( + String clusterAlias, + ProtobufTransportService transportService, + ProtobufRemoteConnectionManager connectionManager, + Settings settings, + int maxNumConnections, + String configuredAddress, + String configuredServerName + ) { + this( + clusterAlias, + transportService, + connectionManager, + settings, + maxNumConnections, + configuredAddress, + () -> resolveAddress(configuredAddress), + configuredServerName + ); + } + + ProtobufProxyConnectionStrategy( + String clusterAlias, + ProtobufTransportService transportService, + ProtobufRemoteConnectionManager connectionManager, + Settings settings, + int maxNumConnections, + String configuredAddress, + Supplier address, + String configuredServerName + ) { + super(clusterAlias, transportService, connectionManager, settings); + this.maxNumConnections = maxNumConnections; + this.configuredAddress = configuredAddress; + this.configuredServerName = configuredServerName; + assert Strings.isEmpty(configuredAddress) == false : "Cannot use proxy connection strategy with no configured addresses"; + this.address = address; + this.clusterNameValidator = (newConnection, actualProfile, listener) -> transportService.handshake( + newConnection, + actualProfile.getHandshakeTimeout().millis(), + cn -> true, + ActionListener.map(listener, resp -> { + ProtobufClusterName remote = resp.getClusterName(); + if (remoteClusterName.compareAndSet(null, remote)) { + return null; + } else { + if (remoteClusterName.get().equals(remote) == false) { + ProtobufDiscoveryNode node = newConnection.getNode(); + throw new ProtobufConnectTransportException(node, "handshake failed. unexpected remote cluster name " + remote); + } + return null; + } + }) + ); + } + + static Stream> enablementSettings() { + return Stream.of(ProxyConnectionStrategy.PROXY_ADDRESS); + } + + static ProtobufWriteable.Reader infoReader() { + return ProxyModeInfo::new; + } + + @Override + protected boolean shouldOpenMoreConnections() { + return connectionManager.size() < maxNumConnections; + } + + @Override + protected boolean strategyMustBeRebuilt(Settings newSettings) { + String address = PROXY_ADDRESS.getConcreteSettingForNamespace(clusterAlias).get(newSettings); + int numOfSockets = REMOTE_SOCKET_CONNECTIONS.getConcreteSettingForNamespace(clusterAlias).get(newSettings); + String serverName = SERVER_NAME.getConcreteSettingForNamespace(clusterAlias).get(newSettings); + return numOfSockets != maxNumConnections + || configuredAddress.equals(address) == false + || Objects.equals(serverName, configuredServerName) == false; + } + + @Override + protected ProtobufConnectionStrategy strategyType() { + return ProtobufConnectionStrategy.PROXY; + } + + @Override + protected void connectImpl(ActionListener listener) { + performProxyConnectionProcess(listener); + } + + @Override + public ProtobufRemoteConnectionInfo.ModeInfo getModeInfo() { + return new ProxyModeInfo(configuredAddress, configuredServerName, maxNumConnections, connectionManager.size()); + } + + private void performProxyConnectionProcess(ActionListener listener) { + openConnections(listener, 1); + } + + private void openConnections(ActionListener finished, int attemptNumber) { + if (attemptNumber <= MAX_CONNECT_ATTEMPTS_PER_RUN) { + ProtobufTransportAddress resolved = address.get(); + + int remaining = maxNumConnections - connectionManager.size(); + ActionListener compositeListener = new ActionListener() { + + private final AtomicInteger successfulConnections = new AtomicInteger(0); + private final CountDown countDown = new CountDown(remaining); + + @Override + public void onResponse(Void v) { + successfulConnections.incrementAndGet(); + if (countDown.countDown()) { + if (shouldOpenMoreConnections()) { + openConnections(finished, attemptNumber + 1); + } else { + finished.onResponse(v); + } + } + } + + @Override + public void onFailure(Exception e) { + if (countDown.countDown()) { + openConnections(finished, attemptNumber + 1); + } + } + }; + + for (int i = 0; i < remaining; ++i) { + String id = clusterAlias + "#" + resolved; + Map attributes; + if (Strings.isNullOrEmpty(configuredServerName)) { + attributes = Collections.emptyMap(); + } else { + attributes = Collections.singletonMap("server_name", configuredServerName); + } + ProtobufDiscoveryNode node = new ProtobufDiscoveryNode( + id, + resolved, + attributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT.minimumCompatibilityVersion() + ); + + connectionManager.connectToNode(node, null, clusterNameValidator, new ActionListener() { + @Override + public void onResponse(Void v) { + compositeListener.onResponse(v); + } + + @Override + public void onFailure(Exception e) { + logger.debug( + new ParameterizedMessage( + "failed to open remote connection [remote cluster: {}, address: {}]", + clusterAlias, + resolved + ), + e + ); + compositeListener.onFailure(e); + } + }); + } + } else { + int openConnections = connectionManager.size(); + if (openConnections == 0) { + finished.onFailure( + new IllegalStateException("Unable to open any proxy connections to remote cluster [" + clusterAlias + "]") + ); + } else { + logger.debug( + "unable to open maximum number of connections [remote cluster: {}, opened: {}, maximum: {}]", + clusterAlias, + openConnections, + maxNumConnections + ); + finished.onResponse(null); + } + } + } + + private static ProtobufTransportAddress resolveAddress(String address) { + return new ProtobufTransportAddress(parseConfiguredAddress(address)); + } + + /** + * Contains information about the proxy mode + * + * @opensearch.internal + */ + public static class ProxyModeInfo implements ProtobufRemoteConnectionInfo.ModeInfo { + + private final String address; + private final String serverName; + private final int maxSocketConnections; + private final int numSocketsConnected; + + public ProxyModeInfo(String address, String serverName, int maxSocketConnections, int numSocketsConnected) { + this.address = address; + this.serverName = serverName; + this.maxSocketConnections = maxSocketConnections; + this.numSocketsConnected = numSocketsConnected; + } + + private ProxyModeInfo(CodedInputStream input) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(input); + address = input.readString(); + serverName = input.readString(); + maxSocketConnections = protobufStreamInput.readVInt(); + numSocketsConnected = protobufStreamInput.readVInt(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("proxy_address", address); + builder.field("server_name", serverName); + builder.field("num_proxy_sockets_connected", numSocketsConnected); + builder.field("max_proxy_socket_connections", maxSocketConnections); + return builder; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeStringNoTag(address); + out.writeStringNoTag(serverName); + out.writeInt32NoTag(maxSocketConnections); + out.writeInt32NoTag(numSocketsConnected); + } + + @Override + public boolean isConnected() { + return numSocketsConnected > 0; + } + + @Override + public String modeName() { + return "proxy"; + } + + public String getAddress() { + return address; + } + + public String getServerName() { + return serverName; + } + + public int getMaxSocketConnections() { + return maxSocketConnections; + } + + public int getNumSocketsConnected() { + return numSocketsConnected; + } + + @Override + public ProtobufRemoteConnectionStrategy.ProtobufConnectionStrategy modeType() { + return ProtobufRemoteConnectionStrategy.ProtobufConnectionStrategy.PROXY; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ProxyModeInfo otherProxy = (ProxyModeInfo) o; + return maxSocketConnections == otherProxy.maxSocketConnections + && numSocketsConnected == otherProxy.numSocketsConnected + && Objects.equals(address, otherProxy.address) + && Objects.equals(serverName, otherProxy.serverName); + } + + @Override + public int hashCode() { + return Objects.hash(address, serverName, maxSocketConnections, numSocketsConnected); + } + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufReceiveTimeoutTransportException.java b/server/src/main/java/org/opensearch/transport/ProtobufReceiveTimeoutTransportException.java new file mode 100644 index 0000000000000..28fb5baa28b2b --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufReceiveTimeoutTransportException.java @@ -0,0 +1,31 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; + +import java.io.IOException; + +/** + * Thrown when receiving a timeout +* +* @opensearch.internal +*/ +public class ProtobufReceiveTimeoutTransportException extends ProtobufActionTransportException { + + public ProtobufReceiveTimeoutTransportException(ProtobufDiscoveryNode node, String action, String msg) { + super(node.getName(), node.getAddress(), action, msg, null); + } + + public ProtobufReceiveTimeoutTransportException(CodedInputStream in) throws IOException { + super(in); + } + +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java new file mode 100644 index 0000000000000..c400597c97d4f --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java @@ -0,0 +1,78 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ProtobufActionListenerResponseHandler; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.client.ProtobufClient; +import org.opensearch.client.support.ProtobufAbstractClient; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.threadpool.ProtobufThreadPool; + +/** + * ProtobufClient that is aware of remote clusters +* +* @opensearch.internal +*/ +final class ProtobufRemoteClusterAwareClient extends ProtobufAbstractClient { + + private final ProtobufTransportService service; + private final String clusterAlias; + private final ProtobufRemoteClusterService remoteClusterService; + + ProtobufRemoteClusterAwareClient( + Settings settings, + ProtobufThreadPool threadPool, + ProtobufTransportService service, + String clusterAlias + ) { + super(settings, threadPool); + this.service = service; + this.clusterAlias = clusterAlias; + this.remoteClusterService = service.getRemoteClusterService(); + } + + @Override + protected void doExecute( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + remoteClusterService.ensureConnected(clusterAlias, ActionListener.wrap(v -> { + ProtobufTransport.Connection connection; + if (request instanceof ProtobufRemoteClusterAwareRequest) { + ProtobufDiscoveryNode preferredTargetNode = ((ProtobufRemoteClusterAwareRequest) request).getPreferredTargetNode(); + connection = remoteClusterService.getConnection(preferredTargetNode, clusterAlias); + } else { + connection = remoteClusterService.getConnection(clusterAlias); + } + service.sendRequest( + connection, + action.name(), + request, + TransportRequestOptions.EMPTY, + new ProtobufActionListenerResponseHandler<>(listener, action.getResponseReader()) + ); + }, listener::onFailure)); + } + + @Override + public void close() { + // do nothing + } + + @Override + public ProtobufClient getRemoteClusterClient(String clusterAlias) { + return remoteClusterService.getRemoteClusterClient(threadPool(), clusterAlias); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareRequest.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareRequest.java new file mode 100644 index 0000000000000..80576bd9608bf --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareRequest.java @@ -0,0 +1,29 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.cluster.node.ProtobufDiscoveryNode; + +/** + * Request for remote clusters +* +* @opensearch.internal +*/ +public interface ProtobufRemoteClusterAwareRequest { + + /** + * Returns the preferred discovery node for this request. The remote cluster client will attempt to send + * this request directly to this node. Otherwise, it will send the request as a proxy action that will + * be routed by the remote cluster to this node. + * + * @return preferred discovery node + */ + ProtobufDiscoveryNode getPreferredTargetNode(); + +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java new file mode 100644 index 0000000000000..58d90365f0ef5 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java @@ -0,0 +1,229 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.state.ClusterStateAction; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; +import org.opensearch.action.support.ContextPreservingActionListener; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.ProtobufDiscoveryNodes; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.threadpool.ProtobufThreadPool; + +import java.io.Closeable; +import java.io.IOException; +import java.util.function.Function; + +/** + * Represents a connection to a single remote cluster. In contrast to a local cluster a remote cluster is not joined such that the +* current node is part of the cluster and it won't receive cluster state updates from the remote cluster. Remote clusters are also not +* fully connected with the current node. From a connection perspective a local cluster forms a bi-directional star network while in the +* remote case we only connect to a subset of the nodes in the cluster in an uni-directional fashion. +* +* This class also handles the discovery of nodes from the remote cluster. The initial list of seed nodes is only used to discover all nodes +* in the remote cluster and connects to all eligible nodes, for details see {@link RemoteClusterService#REMOTE_NODE_ATTRIBUTE}. +* +* In the case of a disconnection, this class will issue a re-connect task to establish at most +* {@link SniffConnectionStrategy#REMOTE_CONNECTIONS_PER_CLUSTER} until either all eligible nodes are exhausted or the maximum number of +* connections per cluster has been reached. +* +* @opensearch.internal +*/ +final class ProtobufRemoteClusterConnection implements Closeable { + + private final ProtobufTransportService transportService; + private final ProtobufRemoteConnectionManager remoteConnectionManager; + private final ProtobufRemoteConnectionStrategy connectionStrategy; + private final String clusterAlias; + private final ProtobufThreadPool threadPool; + private volatile boolean skipUnavailable; + private final TimeValue initialConnectionTimeout; + + /** + * Creates a new {@link ProtobufRemoteClusterConnection} + * @param settings the nodes settings object + * @param clusterAlias the configured alias of the cluster to connect to + * @param transportService the local nodes transport service + */ + ProtobufRemoteClusterConnection(Settings settings, String clusterAlias, ProtobufTransportService transportService) { + this.transportService = transportService; + this.clusterAlias = clusterAlias; + ProtobufConnectionProfile profile = ProtobufRemoteConnectionStrategy.buildConnectionProfile(clusterAlias, settings); + this.remoteConnectionManager = new ProtobufRemoteConnectionManager( + clusterAlias, + createConnectionManager(profile, transportService) + ); + this.connectionStrategy = ProtobufRemoteConnectionStrategy.buildStrategy( + clusterAlias, + transportService, + remoteConnectionManager, + settings + ); + // we register the transport service here as a listener to make sure we notify handlers on disconnect etc. + this.remoteConnectionManager.addListener(transportService); + this.skipUnavailable = RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(clusterAlias) + .get(settings); + this.threadPool = transportService.threadPool; + initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); + } + + /** + * Updates the skipUnavailable flag that can be dynamically set for each remote cluster + */ + void updateSkipUnavailable(boolean skipUnavailable) { + this.skipUnavailable = skipUnavailable; + } + + /** + * Returns whether this cluster is configured to be skipped when unavailable + */ + boolean isSkipUnavailable() { + return skipUnavailable; + } + + /** + * Ensures that this cluster is connected. If the cluster is connected this operation + * will invoke the listener immediately. + */ + void ensureConnected(ActionListener listener) { + if (remoteConnectionManager.size() == 0) { + connectionStrategy.connect(listener); + } else { + listener.onResponse(null); + } + } + + /** + * Collects all nodes on the connected cluster and returns / passes a nodeID to {@link ProtobufDiscoveryNode} lookup function + * that returns null if the node ID is not found. + * + * The requests to get cluster state on the connected cluster are made in the system context because logically + * they are equivalent to checking a single detail in the local cluster state and should not require that the + * user who made the request that is using this method in its implementation is authorized to view the entire + * cluster state. + */ + void collectNodes(ActionListener> listener) { + Runnable runnable = () -> { + final ThreadContext threadContext = threadPool.getThreadContext(); + final ContextPreservingActionListener> contextPreservingActionListener = + new ContextPreservingActionListener<>(threadContext.newRestorableContext(false), listener); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we stash any context here since this is an internal execution and should not leak any existing context information + threadContext.markAsSystemContext(); + + final ProtobufClusterStateRequest request = new ProtobufClusterStateRequest(); + request.clear(); + request.nodes(true); + request.local(true); // run this on the node that gets the request it's as good as any other + ProtobufTransport.Connection connection = remoteConnectionManager.getAnyRemoteConnection(); + transportService.sendRequest( + connection, + ClusterStateAction.NAME, + request, + TransportRequestOptions.EMPTY, + new ProtobufTransportResponseHandler() { + + @Override + public ProtobufClusterStateResponse read(CodedInputStream in) throws IOException { + return new ProtobufClusterStateResponse(in); + } + + @Override + public void handleResponse(ProtobufClusterStateResponse response) { + ProtobufDiscoveryNodes nodes = response.getState().nodes(); + contextPreservingActionListener.onResponse(nodes::get); + } + + @Override + public void handleException(ProtobufTransportException exp) { + contextPreservingActionListener.onFailure(exp); + } + + @Override + public String executor() { + return ProtobufThreadPool.Names.SAME; + } + } + ); + } + }; + try { + // just in case if we are not connected for some reason we try to connect and if we fail we have to notify the listener + // this will cause some back pressure on the search end and eventually will cause rejections but that's fine + // we can't proceed with a search on a cluster level. + // in the future we might want to just skip the remote nodes in such a case but that can already be implemented on the + // caller end since they provide the listener. + ensureConnected(ActionListener.wrap((x) -> runnable.run(), listener::onFailure)); + } catch (Exception ex) { + listener.onFailure(ex); + } + } + + /** + * Returns a connection to the remote cluster, preferably a direct connection to the provided {@link ProtobufDiscoveryNode}. + * If such node is not connected, the returned connection will be a proxy connection that redirects to it. + */ + ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode remoteClusterNode) { + return remoteConnectionManager.getConnection(remoteClusterNode); + } + + ProtobufTransport.Connection getConnection() { + return remoteConnectionManager.getAnyRemoteConnection(); + } + + @Override + public void close() throws IOException { + IOUtils.close(connectionStrategy, remoteConnectionManager); + } + + public boolean isClosed() { + return connectionStrategy.isClosed(); + } + + // for testing only + boolean assertNoRunningConnections() { + return connectionStrategy.assertNoRunningConnections(); + } + + boolean isNodeConnected(final ProtobufDiscoveryNode node) { + return remoteConnectionManager.nodeConnected(node); + } + + /** + * Get the information about remote nodes to be rendered on {@code _remote/info} requests. + */ + public ProtobufRemoteConnectionInfo getConnectionInfo() { + return new ProtobufRemoteConnectionInfo(clusterAlias, connectionStrategy.getModeInfo(), initialConnectionTimeout, skipUnavailable); + } + + int getNumNodesConnected() { + return remoteConnectionManager.size(); + } + + private static ProtobufConnectionManager createConnectionManager( + ProtobufConnectionProfile connectionProfile, + ProtobufTransportService transportService + ) { + return new ProtobufClusterConnectionManager(connectionProfile, transportService.transport); + } + + ProtobufConnectionManager getConnectionManager() { + return remoteConnectionManager; + } + + boolean shouldRebuildConnection(Settings newSettings) { + return connectionStrategy.shouldRebuildConnection(newSettings); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java new file mode 100644 index 0000000000000..4148beb90c6e1 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java @@ -0,0 +1,454 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.OriginalIndices; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.client.ProtobufClient; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.Strings; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.threadpool.ProtobufThreadPool; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Stream; + +import static org.opensearch.common.settings.Setting.boolSetting; +import static org.opensearch.common.settings.Setting.timeSetting; + +/** + * Basic service for accessing remote clusters via gateway nodes +* +* @opensearch.internal +*/ +public final class ProtobufRemoteClusterService extends RemoteClusterAware implements Closeable { + + private final Logger logger = LogManager.getLogger(ProtobufRemoteClusterService.class); + + /** + * The initial connect timeout for remote cluster connections + */ + public static final Setting REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING = Setting.positiveTimeSetting( + "cluster.remote.initial_connect_timeout", + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope + ); + + /** + * The name of a node attribute to select nodes that should be connected to in the remote cluster. + * For instance a node can be configured with {@code node.attr.gateway: true} in order to be eligible as a gateway node between + * clusters. In that case {@code cluster.remote.node.attr: gateway} can be used to filter out other nodes in the remote cluster. + * The value of the setting is expected to be a boolean, {@code true} for nodes that can become gateways, {@code false} otherwise. + */ + public static final Setting REMOTE_NODE_ATTRIBUTE = Setting.simpleString( + "cluster.remote.node.attr", + Setting.Property.NodeScope + ); + + /** + * If true connecting to remote clusters is supported on this node. If false this node will not establish + * connections to any remote clusters configured. Search requests executed against this node (where this node is the coordinating node) + * will fail if remote cluster syntax is used as an index pattern. The default is true + */ + public static final Setting ENABLE_REMOTE_CLUSTERS = Setting.boolSetting( + "cluster.remote.connect", + true, + Setting.Property.Deprecated, + Setting.Property.NodeScope + ); + + public static final Setting.AffixSetting REMOTE_CLUSTER_SKIP_UNAVAILABLE = Setting.affixKeySetting( + "cluster.remote.", + "skip_unavailable", + (ns, key) -> boolSetting(key, false, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope) + ); + + public static final Setting.AffixSetting REMOTE_CLUSTER_PING_SCHEDULE = Setting.affixKeySetting( + "cluster.remote.", + "transport.ping_schedule", + (ns, key) -> timeSetting( + key, + TransportSettings.PING_SCHEDULE, + new RemoteConnectionEnabled<>(ns, key), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ) + ); + + public static final Setting.AffixSetting REMOTE_CLUSTER_COMPRESS = Setting.affixKeySetting( + "cluster.remote.", + "transport.compress", + (ns, key) -> boolSetting( + key, + TransportSettings.TRANSPORT_COMPRESS, + new RemoteConnectionEnabled<>(ns, key), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ) + ); + + private final boolean enabled; + + public boolean isEnabled() { + return enabled; + } + + private final ProtobufTransportService transportService; + private final Map remoteClusters = ConcurrentCollections.newConcurrentMap(); + + ProtobufRemoteClusterService(Settings settings, ProtobufTransportService transportService) { + super(settings); + this.enabled = ProtobufDiscoveryNode.isRemoteClusterClient(settings); + this.transportService = transportService; + } + + /** + * Returns true if at least one remote cluster is configured + */ + public boolean isCrossClusterSearchEnabled() { + return remoteClusters.isEmpty() == false; + } + + boolean isRemoteNodeConnected(final String remoteCluster, final ProtobufDiscoveryNode node) { + return remoteClusters.get(remoteCluster).isNodeConnected(node); + } + + public Map groupIndices(IndicesOptions indicesOptions, String[] indices, Predicate indexExists) { + Map originalIndicesMap = new HashMap<>(); + if (isCrossClusterSearchEnabled()) { + final Map> groupedIndices = groupClusterIndices(getRemoteClusterNames(), indices, indexExists); + if (groupedIndices.isEmpty()) { + // search on _all in the local cluster if neither local indices nor remote indices were specified + originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions)); + } else { + for (Map.Entry> entry : groupedIndices.entrySet()) { + String clusterAlias = entry.getKey(); + List originalIndices = entry.getValue(); + originalIndicesMap.put(clusterAlias, new OriginalIndices(originalIndices.toArray(new String[0]), indicesOptions)); + } + } + } else { + originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(indices, indicesOptions)); + } + return originalIndicesMap; + } + + /** + * Returns true iff the given cluster is configured as a remote cluster. Otherwise false + */ + boolean isRemoteClusterRegistered(String clusterName) { + return remoteClusters.containsKey(clusterName); + } + + /** + * Returns the registered remote cluster names. + */ + public Set getRegisteredRemoteClusterNames() { + // remoteClusters is unmodifiable so its key set will be unmodifiable too + return remoteClusters.keySet(); + } + + /** + * Returns a connection to the given node on the given remote cluster + * + * @throws IllegalArgumentException if the remote cluster is unknown + */ + public ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode node, String cluster) { + return getRemoteClusterConnection(cluster).getConnection(node); + } + + /** + * Ensures that the given cluster alias is connected. If the cluster is connected this operation + * will invoke the listener immediately. + */ + void ensureConnected(String clusterAlias, ActionListener listener) { + getRemoteClusterConnection(clusterAlias).ensureConnected(listener); + } + + /** + * Returns whether the cluster identified by the provided alias is configured to be skipped when unavailable + */ + public boolean isSkipUnavailable(String clusterAlias) { + return getRemoteClusterConnection(clusterAlias).isSkipUnavailable(); + } + + public ProtobufTransport.Connection getConnection(String cluster) { + return getRemoteClusterConnection(cluster).getConnection(); + } + + ProtobufRemoteClusterConnection getRemoteClusterConnection(String cluster) { + if (enabled == false) { + throw new IllegalArgumentException( + "this node does not have the " + DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName() + " role" + ); + } + ProtobufRemoteClusterConnection connection = remoteClusters.get(cluster); + if (connection == null) { + throw new NoSuchRemoteClusterException(cluster); + } + return connection; + } + + Set getRemoteClusterNames() { + return this.remoteClusters.keySet(); + } + + @Override + public void listenForUpdates(ClusterSettings clusterSettings) { + super.listenForUpdates(clusterSettings); + clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); + } + + private synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) { + ProtobufRemoteClusterConnection remote = this.remoteClusters.get(clusterAlias); + if (remote != null) { + remote.updateSkipUnavailable(skipUnavailable); + } + } + + @Override + protected void updateRemoteCluster(String clusterAlias, Settings settings) { + CountDownLatch latch = new CountDownLatch(1); + updateRemoteCluster(clusterAlias, settings, ActionListener.wrap(latch::countDown)); + + try { + // Wait 10 seconds for a connections. We must use a latch instead of a future because we + // are on the cluster state thread and our custom future implementation will throw an + // assertion. + if (latch.await(10, TimeUnit.SECONDS) == false) { + logger.warn("failed to connect to new remote cluster {} within {}", clusterAlias, TimeValue.timeValueSeconds(10)); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + /** + * This method updates the list of remote clusters. It's intended to be used as an update consumer on the settings infrastructure + * + * @param clusterAlias a cluster alias to discovery node mapping representing the remote clusters seeds nodes + * @param newSettings the updated settings for the remote connection + * @param listener a listener invoked once every configured cluster has been connected to + */ + synchronized void updateRemoteCluster(String clusterAlias, Settings newSettings, ActionListener listener) { + if (LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { + throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); + } + + ProtobufRemoteClusterConnection remote = this.remoteClusters.get(clusterAlias); + if (RemoteConnectionStrategy.isConnectionEnabled(clusterAlias, newSettings) == false) { + try { + IOUtils.close(remote); + } catch (IOException e) { + logger.warn("failed to close remote cluster connections for cluster: " + clusterAlias, e); + } + remoteClusters.remove(clusterAlias); + listener.onResponse(null); + return; + } + + if (remote == null) { + // this is a new cluster we have to add a new representation + Settings finalSettings = Settings.builder().put(this.settings, false).put(newSettings, false).build(); + remote = new ProtobufRemoteClusterConnection(finalSettings, clusterAlias, transportService); + remoteClusters.put(clusterAlias, remote); + remote.ensureConnected(listener); + } else if (remote.shouldRebuildConnection(newSettings)) { + // Changes to connection configuration. Must tear down existing connection + try { + IOUtils.close(remote); + } catch (IOException e) { + logger.warn("failed to close remote cluster connections for cluster: " + clusterAlias, e); + } + remoteClusters.remove(clusterAlias); + Settings finalSettings = Settings.builder().put(this.settings, false).put(newSettings, false).build(); + remote = new ProtobufRemoteClusterConnection(finalSettings, clusterAlias, transportService); + remoteClusters.put(clusterAlias, remote); + remote.ensureConnected(listener); + } else { + // No changes to connection configuration. + listener.onResponse(null); + } + } + + /** + * Connects to all remote clusters in a blocking fashion. This should be called on node startup to establish an initial connection + * to all configured seed nodes. + */ + void initializeRemoteClusters() { + final TimeValue timeValue = REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); + final PlainActionFuture> future = new PlainActionFuture<>(); + Set enabledClusters = RemoteClusterAware.getEnabledRemoteClusters(settings); + + if (enabledClusters.isEmpty()) { + return; + } + + GroupedActionListener listener = new GroupedActionListener<>(future, enabledClusters.size()); + for (String clusterAlias : enabledClusters) { + updateRemoteCluster(clusterAlias, settings, listener); + } + + if (enabledClusters.isEmpty()) { + future.onResponse(null); + } + + try { + future.get(timeValue.millis(), TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (TimeoutException ex) { + logger.warn("failed to connect to remote clusters within {}", timeValue.toString()); + } catch (Exception e) { + throw new IllegalStateException("failed to connect to remote clusters", e); + } + } + + @Override + public void close() throws IOException { + IOUtils.close(remoteClusters.values()); + } + + public Stream getRemoteConnectionInfos() { + return remoteClusters.values().stream().map(ProtobufRemoteClusterConnection::getConnectionInfo); + } + + /** + * Collects all nodes of the given clusters and returns / passes a (clusterAlias, nodeId) to {@link ProtobufDiscoveryNode} + * function on success. + */ + public void collectNodes(Set clusters, ActionListener> listener) { + if (enabled == false) { + throw new IllegalArgumentException( + "this node does not have the " + DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName() + " role" + ); + } + Map remoteClusters = this.remoteClusters; + for (String cluster : clusters) { + if (remoteClusters.containsKey(cluster) == false) { + listener.onFailure(new NoSuchRemoteClusterException(cluster)); + return; + } + } + + final Map> clusterMap = new HashMap<>(); + CountDown countDown = new CountDown(clusters.size()); + Function nullFunction = s -> null; + for (final String cluster : clusters) { + ProtobufRemoteClusterConnection connection = remoteClusters.get(cluster); + connection.collectNodes(new ActionListener>() { + @Override + public void onResponse(Function nodeLookup) { + synchronized (clusterMap) { + clusterMap.put(cluster, nodeLookup); + } + if (countDown.countDown()) { + listener.onResponse((clusterAlias, nodeId) -> clusterMap.getOrDefault(clusterAlias, nullFunction).apply(nodeId)); + } + } + + @Override + public void onFailure(Exception e) { + if (countDown.fastForward()) { // we need to check if it's true since we could have multiple failures + listener.onFailure(e); + } + } + }); + } + } + + /** + * Returns a client to the remote cluster if the given cluster alias exists. + * + * @param threadPool the {@link ProtobufThreadPool} for the client + * @param clusterAlias the cluster alias the remote cluster is registered under + * @throws IllegalArgumentException if the given clusterAlias doesn't exist + */ + public ProtobufClient getRemoteClusterClient(ProtobufThreadPool threadPool, String clusterAlias) { + if (transportService.getRemoteClusterService().isEnabled() == false) { + throw new IllegalArgumentException( + "this node does not have the " + DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName() + " role" + ); + } + if (transportService.getRemoteClusterService().getRemoteClusterNames().contains(clusterAlias) == false) { + throw new NoSuchRemoteClusterException(clusterAlias); + } + return new ProtobufRemoteClusterAwareClient(settings, threadPool, transportService, clusterAlias); + } + + Collection getConnections() { + return remoteClusters.values(); + } + + /** + * Internal class to hold cluster alias and key and track a remote connection + * + * @opensearch.internal + */ + private static class RemoteConnectionEnabled implements Setting.Validator { + + private final String clusterAlias; + private final String key; + + private RemoteConnectionEnabled(String clusterAlias, String key) { + this.clusterAlias = clusterAlias; + this.key = key; + } + + @Override + public void validate(T value) {} + + @Override + public void validate(T value, Map, Object> settings, boolean isPresent) { + if (isPresent && RemoteConnectionStrategy.isConnectionEnabled(clusterAlias, settings) == false) { + throw new IllegalArgumentException("Cannot configure setting [" + key + "] if remote cluster is not enabled."); + } + } + + @Override + public Iterator> settings() { + return Stream.concat( + Stream.of(RemoteConnectionStrategy.REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace(clusterAlias)), + settingsStream() + ).iterator(); + } + + private Stream> settingsStream() { + return Arrays.stream(RemoteConnectionStrategy.ConnectionStrategy.values()) + .flatMap(strategy -> strategy.getEnablementSettings().get()) + .map(as -> as.getConcreteSettingForNamespace(clusterAlias)); + } + }; +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionInfo.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionInfo.java new file mode 100644 index 0000000000000..6527c3e0e5814 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionInfo.java @@ -0,0 +1,132 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class encapsulates all remote cluster information to be rendered on +* {@code _remote/info} requests. +* +* @opensearch.internal +*/ +public final class ProtobufRemoteConnectionInfo implements ToXContentFragment, ProtobufWriteable { + + final ModeInfo modeInfo; + final TimeValue initialConnectionTimeout; + final String clusterAlias; + final boolean skipUnavailable; + + public ProtobufRemoteConnectionInfo( + String clusterAlias, + ModeInfo modeInfo, + TimeValue initialConnectionTimeout, + boolean skipUnavailable + ) { + this.clusterAlias = clusterAlias; + this.modeInfo = modeInfo; + this.initialConnectionTimeout = initialConnectionTimeout; + this.skipUnavailable = skipUnavailable; + } + + public ProtobufRemoteConnectionInfo(CodedInputStream input) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(input); + ProtobufRemoteConnectionStrategy.ProtobufConnectionStrategy mode = protobufStreamInput.readEnum( + ProtobufRemoteConnectionStrategy.ProtobufConnectionStrategy.class + ); + modeInfo = mode.getReader().read(input); + initialConnectionTimeout = protobufStreamInput.readTimeValue(); + clusterAlias = input.readString(); + skipUnavailable = input.readBool(); + } + + public boolean isConnected() { + return modeInfo.isConnected(); + } + + public String getClusterAlias() { + return clusterAlias; + } + + public ModeInfo getModeInfo() { + return modeInfo; + } + + public TimeValue getInitialConnectionTimeout() { + return initialConnectionTimeout; + } + + public boolean isSkipUnavailable() { + return skipUnavailable; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeEnum(modeInfo.modeType()); + modeInfo.writeTo(out); + protobufStreamOutput.writeTimeValue(initialConnectionTimeout); + out.writeStringNoTag(clusterAlias); + out.writeBoolNoTag(skipUnavailable); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(clusterAlias); + { + builder.field("connected", modeInfo.isConnected()); + builder.field("mode", modeInfo.modeName()); + modeInfo.toXContent(builder, params); + builder.field("initial_connect_timeout", initialConnectionTimeout); + builder.field("skip_unavailable", skipUnavailable); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ProtobufRemoteConnectionInfo that = (ProtobufRemoteConnectionInfo) o; + return skipUnavailable == that.skipUnavailable + && Objects.equals(modeInfo, that.modeInfo) + && Objects.equals(initialConnectionTimeout, that.initialConnectionTimeout) + && Objects.equals(clusterAlias, that.clusterAlias); + } + + @Override + public int hashCode() { + return Objects.hash(modeInfo, initialConnectionTimeout, clusterAlias, skipUnavailable); + } + + /** + * Mode information + * + * @opensearch.internal + */ + public interface ModeInfo extends ToXContentFragment, ProtobufWriteable { + + boolean isConnected(); + + String modeName(); + + ProtobufRemoteConnectionStrategy.ProtobufConnectionStrategy modeType(); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java new file mode 100644 index 0000000000000..a8608b80768a6 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java @@ -0,0 +1,217 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Manager for connecting to remote nodes +* +* @opensearch.internal +*/ +public class ProtobufRemoteConnectionManager implements ProtobufConnectionManager { + + private final String clusterAlias; + private final ProtobufConnectionManager delegate; + private final AtomicLong counter = new AtomicLong(); + private volatile List connectedNodes = Collections.emptyList(); + + ProtobufRemoteConnectionManager(String clusterAlias, ProtobufConnectionManager delegate) { + this.clusterAlias = clusterAlias; + this.delegate = delegate; + this.delegate.addListener(new ProtobufTransportConnectionListener() { + @Override + public void onNodeConnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) { + addConnectedNode(node); + } + + @Override + public void onNodeDisconnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) { + removeConnectedNode(node); + } + }); + } + + @Override + public void connectToNode( + ProtobufDiscoveryNode node, + ProtobufConnectionProfile connectionProfile, + ProtobufConnectionManager.ConnectionValidator connectionValidator, + ActionListener listener + ) throws ConnectTransportException { + delegate.connectToNode(node, connectionProfile, connectionValidator, listener); + } + + @Override + public void addListener(ProtobufTransportConnectionListener listener) { + delegate.addListener(listener); + } + + @Override + public void removeListener(ProtobufTransportConnectionListener listener) { + delegate.removeListener(listener); + } + + @Override + public void openConnection( + ProtobufDiscoveryNode node, + ProtobufConnectionProfile profile, + ActionListener listener + ) { + delegate.openConnection(node, profile, listener); + } + + @Override + public ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode node) { + try { + return delegate.getConnection(node); + } catch (NodeNotConnectedException e) { + return new ProxyConnection(getAnyRemoteConnection(), node); + } + } + + @Override + public boolean nodeConnected(ProtobufDiscoveryNode node) { + return delegate.nodeConnected(node); + } + + @Override + public void disconnectFromNode(ProtobufDiscoveryNode node) { + delegate.disconnectFromNode(node); + } + + @Override + public ProtobufConnectionProfile getConnectionProfile() { + return delegate.getConnectionProfile(); + } + + public ProtobufTransport.Connection getAnyRemoteConnection() { + List localConnectedNodes = this.connectedNodes; + long curr; + while ((curr = counter.incrementAndGet()) == Long.MIN_VALUE) + ; + if (localConnectedNodes.isEmpty() == false) { + ProtobufDiscoveryNode nextNode = localConnectedNodes.get( + Math.toIntExact(Math.floorMod(curr, (long) localConnectedNodes.size())) + ); + try { + return delegate.getConnection(nextNode); + } catch (NodeNotConnectedException e) { + // Ignore. We will manually create an iterator of open nodes + } + } + Set allConnectionNodes = getAllConnectedNodes(); + for (ProtobufDiscoveryNode connectedNode : allConnectionNodes) { + try { + return delegate.getConnection(connectedNode); + } catch (NodeNotConnectedException e) { + // Ignore. We will try the next one until all are exhausted. + } + } + throw new NoSuchRemoteClusterException(clusterAlias); + } + + @Override + public Set getAllConnectedNodes() { + return delegate.getAllConnectedNodes(); + } + + @Override + public int size() { + return delegate.size(); + } + + @Override + public void close() { + delegate.closeNoBlock(); + } + + @Override + public void closeNoBlock() { + delegate.closeNoBlock(); + } + + private synchronized void addConnectedNode(ProtobufDiscoveryNode addedNode) { + ArrayList newConnections = new ArrayList<>(this.connectedNodes); + newConnections.add(addedNode); + this.connectedNodes = Collections.unmodifiableList(newConnections); + } + + private synchronized void removeConnectedNode(ProtobufDiscoveryNode removedNode) { + int newSize = this.connectedNodes.size() - 1; + ArrayList newConnectedNodes = new ArrayList<>(newSize); + for (ProtobufDiscoveryNode connectedNode : this.connectedNodes) { + if (connectedNode.equals(removedNode) == false) { + newConnectedNodes.add(connectedNode); + } + } + assert newConnectedNodes.size() == newSize : "Expected connection node count: " + newSize + ", Found: " + newConnectedNodes.size(); + this.connectedNodes = Collections.unmodifiableList(newConnectedNodes); + } + + static final class ProxyConnection implements ProtobufTransport.Connection { + private final ProtobufTransport.Connection connection; + private final ProtobufDiscoveryNode targetNode; + + private ProxyConnection(ProtobufTransport.Connection connection, ProtobufDiscoveryNode targetNode) { + this.connection = connection; + this.targetNode = targetNode; + } + + @Override + public ProtobufDiscoveryNode getNode() { + return targetNode; + } + + @Override + public void sendRequest(long requestId, String action, ProtobufTransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + connection.sendRequest( + requestId, + ProtobufTransportActionProxy.getProxyAction(action), + ProtobufTransportActionProxy.wrapRequest(targetNode, request), + options + ); + } + + @Override + public void close() { + assert false : "proxy connections must not be closed"; + } + + @Override + public void addCloseListener(ActionListener listener) { + connection.addCloseListener(listener); + } + + @Override + public boolean isClosed() { + return connection.isClosed(); + } + + @Override + public Version getVersion() { + return connection.getVersion(); + } + + @Override + public Object getCacheKey() { + return connection.getCacheKey(); + } + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java new file mode 100644 index 0000000000000..0ee4507fc98a8 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java @@ -0,0 +1,484 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +/* +* Licensed to Elasticsearch under one or more contributor +* license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright +* ownership. Elasticsearch licenses this file to you under +* the Apache License, Version 2.0 (the "License"); you may +* not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, +* software distributed under the License is distributed on an +* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +* KIND, either express or implied. See the License for the +* specific language governing permissions and limitations +* under the License. +*/ + +/* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +package org.opensearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ContextPreservingActionListener; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.threadpool.ThreadPool; + +import java.io.Closeable; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Strategy to connect to remote nodes +* +* @opensearch.internal +*/ +public abstract class ProtobufRemoteConnectionStrategy implements ProtobufTransportConnectionListener, Closeable { + + enum ProtobufConnectionStrategy { + SNIFF( + ProtobufSniffConnectionStrategy.CHANNELS_PER_CONNECTION, + ProtobufSniffConnectionStrategy::enablementSettings, + ProtobufSniffConnectionStrategy::infoReader + ) { + @Override + public String toString() { + return "sniff"; + } + }, + PROXY( + ProtobufProxyConnectionStrategy.CHANNELS_PER_CONNECTION, + ProtobufProxyConnectionStrategy::enablementSettings, + ProtobufProxyConnectionStrategy::infoReader + ) { + @Override + public String toString() { + return "proxy"; + } + }; + + private final int numberOfChannels; + private final Supplier>> enablementSettings; + private final Supplier> reader; + + ProtobufConnectionStrategy( + int numberOfChannels, + Supplier>> enablementSettings, + Supplier> reader + ) { + this.numberOfChannels = numberOfChannels; + this.enablementSettings = enablementSettings; + this.reader = reader; + } + + public int getNumberOfChannels() { + return numberOfChannels; + } + + public Supplier>> getEnablementSettings() { + return enablementSettings; + } + + public ProtobufWriteable.Reader getReader() { + return reader.get(); + } + } + + public static final Setting.AffixSetting REMOTE_CONNECTION_MODE = Setting.affixKeySetting( + "cluster.remote.", + "mode", + key -> new Setting<>( + key, + ProtobufConnectionStrategy.SNIFF.name(), + value -> ProtobufConnectionStrategy.valueOf(value.toUpperCase(Locale.ROOT)), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + ); + + // this setting is intentionally not registered, it is only used in tests + public static final Setting REMOTE_MAX_PENDING_CONNECTION_LISTENERS = Setting.intSetting( + "cluster.remote.max_pending_connection_listeners", + 1000, + Setting.Property.NodeScope + ); + + private final int maxPendingConnectionListeners; + + protected final Logger logger = LogManager.getLogger(getClass()); + + private final AtomicBoolean closed = new AtomicBoolean(false); + private final Object mutex = new Object(); + private List> listeners = new ArrayList<>(); + + protected final ProtobufTransportService transportService; + protected final ProtobufRemoteConnectionManager connectionManager; + protected final String clusterAlias; + + ProtobufRemoteConnectionStrategy( + String clusterAlias, + ProtobufTransportService transportService, + ProtobufRemoteConnectionManager connectionManager, + Settings settings + ) { + this.clusterAlias = clusterAlias; + this.transportService = transportService; + this.connectionManager = connectionManager; + this.maxPendingConnectionListeners = REMOTE_MAX_PENDING_CONNECTION_LISTENERS.get(settings); + connectionManager.addListener(this); + } + + static ProtobufConnectionProfile buildConnectionProfile(String clusterAlias, Settings settings) { + ProtobufConnectionStrategy mode = REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace(clusterAlias).get(settings); + ProtobufConnectionProfile.Builder builder = new ProtobufConnectionProfile.Builder().setConnectTimeout( + TransportSettings.CONNECT_TIMEOUT.get(settings) + ) + .setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) + .setCompressionEnabled(RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace(clusterAlias).get(settings)) + .setPingInterval(RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE.getConcreteSettingForNamespace(clusterAlias).get(settings)) + .addConnections( + 0, + TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.STATE, + TransportRequestOptions.Type.RECOVERY, + TransportRequestOptions.Type.PING + ) + .addConnections(mode.numberOfChannels, TransportRequestOptions.Type.REG); + return builder.build(); + } + + static ProtobufRemoteConnectionStrategy buildStrategy( + String clusterAlias, + ProtobufTransportService transportService, + ProtobufRemoteConnectionManager connectionManager, + Settings settings + ) { + ProtobufConnectionStrategy mode = REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace(clusterAlias).get(settings); + switch (mode) { + case SNIFF: + return new ProtobufSniffConnectionStrategy(clusterAlias, transportService, connectionManager, settings); + case PROXY: + return new ProtobufProxyConnectionStrategy(clusterAlias, transportService, connectionManager, settings); + default: + throw new AssertionError("Invalid connection strategy" + mode); + } + } + + static Set getRemoteClusters(Settings settings) { + final Stream> enablementSettings = Arrays.stream(ProtobufConnectionStrategy.values()) + .flatMap(strategy -> strategy.getEnablementSettings().get()); + return enablementSettings.flatMap(s -> getClusterAlias(settings, s)).collect(Collectors.toSet()); + } + + public static boolean isConnectionEnabled(String clusterAlias, Settings settings) { + ProtobufConnectionStrategy mode = REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace(clusterAlias).get(settings); + if (mode.equals(ProtobufConnectionStrategy.SNIFF)) { + List seeds = ProtobufSniffConnectionStrategy.REMOTE_CLUSTER_SEEDS.getConcreteSettingForNamespace(clusterAlias) + .get(settings); + return seeds.isEmpty() == false; + } else { + String address = ProtobufProxyConnectionStrategy.PROXY_ADDRESS.getConcreteSettingForNamespace(clusterAlias).get(settings); + return Strings.isEmpty(address) == false; + } + } + + @SuppressWarnings("unchecked") + public static boolean isConnectionEnabled(String clusterAlias, Map, Object> settings) { + ProtobufConnectionStrategy mode = (ProtobufConnectionStrategy) settings.get( + REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace(clusterAlias) + ); + if (mode.equals(ProtobufConnectionStrategy.SNIFF)) { + List seeds = (List) settings.get( + ProtobufSniffConnectionStrategy.REMOTE_CLUSTER_SEEDS.getConcreteSettingForNamespace(clusterAlias) + ); + return seeds.isEmpty() == false; + } else { + String address = (String) settings.get( + ProtobufProxyConnectionStrategy.PROXY_ADDRESS.getConcreteSettingForNamespace(clusterAlias) + ); + return Strings.isEmpty(address) == false; + } + } + + private static Stream getClusterAlias(Settings settings, Setting.AffixSetting affixSetting) { + Stream> allConcreteSettings = affixSetting.getAllConcreteSettings(settings); + return allConcreteSettings.map(affixSetting::getNamespace); + } + + static InetSocketAddress parseConfiguredAddress(String configuredAddress) { + final String host = parseHost(configuredAddress); + final int port = parsePort(configuredAddress); + InetAddress hostAddress; + try { + hostAddress = InetAddress.getByName(host); + } catch (UnknownHostException e) { + throw new IllegalArgumentException("unknown host [" + host + "]", e); + } + return new InetSocketAddress(hostAddress, port); + } + + static String parseHost(final String configuredAddress) { + return configuredAddress.substring(0, indexOfPortSeparator(configuredAddress)); + } + + static int parsePort(String remoteHost) { + try { + int port = Integer.parseInt(remoteHost.substring(indexOfPortSeparator(remoteHost) + 1)); + if (port <= 0) { + throw new IllegalArgumentException("port number must be > 0 but was: [" + port + "]"); + } + return port; + } catch (NumberFormatException e) { + throw new IllegalArgumentException("failed to parse port", e); + } + } + + private static int indexOfPortSeparator(String remoteHost) { + int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300 + if (portSeparator == -1 || portSeparator == remoteHost.length()) { + throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead"); + } + return portSeparator; + } + + /** + * Triggers a connect round unless there is one running already. If there is a connect round running, the listener will either + * be queued or rejected and failed. + */ + void connect(ActionListener connectListener) { + boolean runConnect = false; + final ActionListener listener = ContextPreservingActionListener.wrapPreservingContext( + connectListener, + transportService.getThreadPool().getThreadContext() + ); + boolean closed; + synchronized (mutex) { + closed = this.closed.get(); + if (closed) { + assert listeners.isEmpty(); + } else { + if (listeners.size() >= maxPendingConnectionListeners) { + assert listeners.size() == maxPendingConnectionListeners; + listener.onFailure(new OpenSearchRejectedExecutionException("connect listener queue is full")); + return; + } else { + listeners.add(listener); + } + runConnect = listeners.size() == 1; + } + } + if (closed) { + connectListener.onFailure(new AlreadyClosedException("connect handler is already closed")); + return; + } + if (runConnect) { + ExecutorService executor = transportService.getThreadPool().executor(ThreadPool.Names.MANAGEMENT); + executor.submit(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + ActionListener.onFailure(getAndClearListeners(), e); + } + + @Override + protected void doRun() { + connectImpl(new ActionListener() { + @Override + public void onResponse(Void aVoid) { + ActionListener.onResponse(getAndClearListeners(), aVoid); + } + + @Override + public void onFailure(Exception e) { + ActionListener.onFailure(getAndClearListeners(), e); + } + }); + } + }); + } + } + + boolean shouldRebuildConnection(Settings newSettings) { + ProtobufConnectionStrategy newMode = REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace(clusterAlias).get(newSettings); + if (newMode.equals(strategyType()) == false) { + return true; + } else { + Boolean compressionEnabled = RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace(clusterAlias) + .get(newSettings); + TimeValue pingSchedule = RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE.getConcreteSettingForNamespace(clusterAlias) + .get(newSettings); + + ProtobufConnectionProfile oldProfile = connectionManager.getConnectionProfile(); + ProtobufConnectionProfile.Builder builder = new ProtobufConnectionProfile.Builder(oldProfile); + builder.setCompressionEnabled(compressionEnabled); + builder.setPingInterval(pingSchedule); + ProtobufConnectionProfile newProfile = builder.build(); + return connectionProfileChanged(oldProfile, newProfile) || strategyMustBeRebuilt(newSettings); + } + } + + protected abstract boolean strategyMustBeRebuilt(Settings newSettings); + + protected abstract ProtobufConnectionStrategy strategyType(); + + @Override + public void onNodeDisconnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) { + if (shouldOpenMoreConnections()) { + // try to reconnect and fill up the slot of the disconnected node + connect( + ActionListener.wrap( + ignore -> logger.trace("[{}] successfully connected after disconnect of {}", clusterAlias, node), + e -> logger.debug( + () -> new ParameterizedMessage("[{}] failed to connect after disconnect of {}", clusterAlias, node), + e + ) + ) + ); + } + } + + @Override + public void close() { + final List> toNotify; + synchronized (mutex) { + if (closed.compareAndSet(false, true)) { + connectionManager.removeListener(this); + toNotify = listeners; + listeners = Collections.emptyList(); + } else { + toNotify = Collections.emptyList(); + } + } + ActionListener.onFailure(toNotify, new AlreadyClosedException("connect handler is already closed")); + } + + public boolean isClosed() { + return closed.get(); + } + + // for testing only + boolean assertNoRunningConnections() { + synchronized (mutex) { + assert listeners.isEmpty(); + } + return true; + } + + protected abstract boolean shouldOpenMoreConnections(); + + protected abstract void connectImpl(ActionListener listener); + + protected abstract ProtobufRemoteConnectionInfo.ModeInfo getModeInfo(); + + private List> getAndClearListeners() { + final List> result; + synchronized (mutex) { + if (listeners.isEmpty()) { + result = Collections.emptyList(); + } else { + result = listeners; + listeners = new ArrayList<>(); + } + } + return result; + } + + private boolean connectionProfileChanged(ProtobufConnectionProfile oldProfile, ProtobufConnectionProfile newProfile) { + return Objects.equals(oldProfile.getCompressionEnabled(), newProfile.getCompressionEnabled()) == false + || Objects.equals(oldProfile.getPingInterval(), newProfile.getPingInterval()) == false; + } + + /** + * Internal strategy validation object + * + * @opensearch.internal + */ + static class StrategyValidator implements Setting.Validator { + + private final String key; + private final ProtobufConnectionStrategy expectedStrategy; + private final String namespace; + private final Consumer valueChecker; + + StrategyValidator(String namespace, String key, ProtobufConnectionStrategy expectedStrategy) { + this(namespace, key, expectedStrategy, (v) -> {}); + } + + StrategyValidator(String namespace, String key, ProtobufConnectionStrategy expectedStrategy, Consumer valueChecker) { + this.namespace = namespace; + this.key = key; + this.expectedStrategy = expectedStrategy; + this.valueChecker = valueChecker; + } + + @Override + public void validate(T value) { + valueChecker.accept(value); + } + + @Override + public void validate(T value, Map, Object> settings, boolean isPresent) { + Setting concrete = REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace(namespace); + ProtobufConnectionStrategy modeType = (ProtobufConnectionStrategy) settings.get(concrete); + if (isPresent && modeType.equals(expectedStrategy) == false) { + throw new IllegalArgumentException( + "Setting \"" + + key + + "\" cannot be used with the configured \"" + + concrete.getKey() + + "\" [required=" + + expectedStrategy.name() + + ", configured=" + + modeType.name() + + "]" + ); + } + } + + @Override + public Iterator> settings() { + Setting concrete = REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace(namespace); + Stream> settingStream = Stream.of(concrete); + return settingStream.iterator(); + } + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteTransportException.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteTransportException.java new file mode 100644 index 0000000000000..b720a95e19264 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteTransportException.java @@ -0,0 +1,42 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.OpenSearchWrapperException; +import org.opensearch.common.transport.ProtobufTransportAddress; + +import java.io.IOException; + +/** + * A remote exception for an action. A wrapper exception around the actual remote cause and does not fill the +* stack trace. +* +* @opensearch.internal +*/ +public class ProtobufRemoteTransportException extends ProtobufActionTransportException implements OpenSearchWrapperException { + + public ProtobufRemoteTransportException(String msg, Throwable cause) { + super(msg, null, null, cause); + } + + public ProtobufRemoteTransportException(String name, ProtobufTransportAddress address, String action, Throwable cause) { + super(name, address, action, cause); + } + + public ProtobufRemoteTransportException(CodedInputStream in) throws IOException { + super(in); + } + + @Override + public Throwable fillInStackTrace() { + // no need for stack trace here, we always have cause + return this; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java index 1ddeb04f106a8..6e4fdbfc4f053 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java @@ -8,11 +8,10 @@ package org.opensearch.transport; -import org.opensearch.common.io.stream.StreamInput; +import com.google.protobuf.CodedInputStream; import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; -import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.ProtobufCancellableTask; import org.opensearch.tasks.ProtobufTask; @@ -57,24 +56,24 @@ public String getAction() { return action; } - public Request newRequest(StreamInput in) throws IOException { + public Request newRequest(CodedInputStream in) throws IOException { return requestReader.read(in); } - public void processMessageReceived(Request request, TransportChannel channel) throws Exception { + public void processMessageReceived(Request request, ProtobufTransportChannel channel) throws Exception { final ProtobufTask task = taskManager.register(channel.getChannelType(), action, request); ThreadContext.StoredContext contextToRestore = taskManager.taskExecutionStarted(task); Releasable unregisterTask = () -> taskManager.unregister(task); try { - if (channel instanceof TcpTransportChannel && task instanceof ProtobufCancellableTask) { + if (channel instanceof ProtobufTcpTransportChannel && task instanceof ProtobufCancellableTask) { // if (request instanceof ShardSearchRequest) { - // // on receiving request, update the inbound network time to reflect time spent in transit over the network - // ((ShardSearchRequest) request).setInboundNetworkTime( - // Math.max(0, System.currentTimeMillis() - ((ShardSearchRequest) request).getInboundNetworkTime()) - // ); + // // on receiving request, update the inbound network time to reflect time spent in transit over the network + // ((ShardSearchRequest) request).setInboundNetworkTime( + // Math.max(0, System.currentTimeMillis() - ((ShardSearchRequest) request).getInboundNetworkTime()) + // ); // } - final TcpChannel tcpChannel = ((TcpTransportChannel) channel).getChannel(); + final TcpChannel tcpChannel = ((ProtobufTcpTransportChannel) channel).getChannel(); final Releasable stopTracking = taskManager.startTrackingCancellableChannelTask(tcpChannel, (ProtobufCancellableTask) task); unregisterTask = Releasables.wrap(unregisterTask, stopTracking); } @@ -108,11 +107,11 @@ public String toString() { return handler.toString(); } - public static RequestHandlerRegistry replaceHandler( - RequestHandlerRegistry registry, + public static ProtobufRequestHandlerRegistry replaceHandler( + ProtobufRequestHandlerRegistry registry, ProtobufTransportRequestHandler handler ) { - return new RequestHandlerRegistry<>( + return new ProtobufRequestHandlerRegistry<>( registry.action, registry.requestReader, registry.taskManager, diff --git a/server/src/main/java/org/opensearch/transport/ProtobufSendRequestTransportException.java b/server/src/main/java/org/opensearch/transport/ProtobufSendRequestTransportException.java new file mode 100644 index 0000000000000..eb1e1340230c4 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufSendRequestTransportException.java @@ -0,0 +1,31 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.OpenSearchWrapperException; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; + +import java.io.IOException; + +/** + * Thrown when an error occurs while sending a request +* +* @opensearch.internal +*/ +public class ProtobufSendRequestTransportException extends ProtobufActionTransportException implements OpenSearchWrapperException { + + public ProtobufSendRequestTransportException(ProtobufDiscoveryNode node, String action, Throwable cause) { + super(node == null ? null : node.getName(), node == null ? null : node.getAddress(), action, cause); + } + + public ProtobufSendRequestTransportException(CodedInputStream in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java new file mode 100644 index 0000000000000..36c851592a6fd --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java @@ -0,0 +1,628 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.admin.cluster.state.ClusterStateAction; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; +import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.Booleans; +import org.opensearch.common.SetOnce; +import org.opensearch.common.Strings; +import org.opensearch.common.UUIDs; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.ProtobufTransportAddress; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.threadpool.ProtobufThreadPool; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.common.settings.Setting.intSetting; + +/** + * Sniff for initial seed nodes +* +* @opensearch.internal +*/ +public class ProtobufSniffConnectionStrategy extends ProtobufRemoteConnectionStrategy { + + /** + * A list of initial seed nodes to discover eligible nodes from the remote cluster + */ + public static final Setting.AffixSetting> REMOTE_CLUSTER_SEEDS = Setting.affixKeySetting( + "cluster.remote.", + "seeds", + (ns, key) -> Setting.listSetting(key, Collections.emptyList(), s -> { + // validate seed address + parsePort(s); + return s; + }, new StrategyValidator<>(ns, key, ProtobufConnectionStrategy.SNIFF), Setting.Property.Dynamic, Setting.Property.NodeScope) + ); + + /** + * A proxy address for the remote cluster. By default this is not set, meaning that OpenSearch will connect directly to the nodes in + * the remote cluster using their publish addresses. If this setting is set to an IP address or hostname then OpenSearch will connect + * to the nodes in the remote cluster using this address instead. Use of this setting is not recommended and it is deliberately + * undocumented as it does not work well with all proxies. + */ + public static final Setting.AffixSetting REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting( + "cluster.remote.", + "proxy", + (ns, key) -> Setting.simpleString(key, new StrategyValidator<>(ns, key, ProtobufConnectionStrategy.SNIFF, s -> { + if (Strings.hasLength(s)) { + parsePort(s); + } + }), Setting.Property.Dynamic, Setting.Property.NodeScope), + () -> REMOTE_CLUSTER_SEEDS + ); + + /** + * The maximum number of connections that will be established to a remote cluster. For instance if there is only a single + * seed node, other nodes will be discovered up to the given number of nodes in this setting. The default is 3. + */ + public static final Setting REMOTE_CONNECTIONS_PER_CLUSTER = intSetting( + "cluster.remote.connections_per_cluster", + 3, + 1, + Setting.Property.NodeScope + ); + /** + * The maximum number of node connections that will be established to a remote cluster. For instance if there is only a single + * seed node, other nodes will be discovered up to the given number of nodes in this setting. The default is 3. + */ + public static final Setting.AffixSetting REMOTE_NODE_CONNECTIONS = Setting.affixKeySetting( + "cluster.remote.", + "node_connections", + (ns, key) -> intSetting( + key, + REMOTE_CONNECTIONS_PER_CLUSTER, + 1, + new StrategyValidator<>(ns, key, ProtobufConnectionStrategy.SNIFF), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ) + ); + + static final int CHANNELS_PER_CONNECTION = 6; + + private static final Predicate DEFAULT_NODE_PREDICATE = (node) -> Version.CURRENT.isCompatible(node.getVersion()) + && (node.isClusterManagerNode() == false || node.isDataNode() || node.isIngestNode()); + + private final List configuredSeedNodes; + private final List> seedNodes; + private final int maxNumRemoteConnections; + private final Predicate nodePredicate; + private final SetOnce remoteClusterName = new SetOnce<>(); + private final String proxyAddress; + + ProtobufSniffConnectionStrategy( + String clusterAlias, + ProtobufTransportService transportService, + ProtobufRemoteConnectionManager connectionManager, + Settings settings + ) { + this( + clusterAlias, + transportService, + connectionManager, + REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterAlias).get(settings), + settings, + REMOTE_NODE_CONNECTIONS.getConcreteSettingForNamespace(clusterAlias).get(settings), + getNodePredicate(settings), + REMOTE_CLUSTER_SEEDS.getConcreteSettingForNamespace(clusterAlias).get(settings) + ); + } + + ProtobufSniffConnectionStrategy( + String clusterAlias, + ProtobufTransportService transportService, + ProtobufRemoteConnectionManager connectionManager, + String proxyAddress, + Settings settings, + int maxNumRemoteConnections, + Predicate nodePredicate, + List configuredSeedNodes + ) { + this( + clusterAlias, + transportService, + connectionManager, + proxyAddress, + settings, + maxNumRemoteConnections, + nodePredicate, + configuredSeedNodes, + configuredSeedNodes.stream() + .map(seedAddress -> (Supplier) () -> resolveSeedNode(clusterAlias, seedAddress, proxyAddress)) + .collect(Collectors.toList()) + ); + } + + ProtobufSniffConnectionStrategy( + String clusterAlias, + ProtobufTransportService transportService, + ProtobufRemoteConnectionManager connectionManager, + String proxyAddress, + Settings settings, + int maxNumRemoteConnections, + Predicate nodePredicate, + List configuredSeedNodes, + List> seedNodes + ) { + super(clusterAlias, transportService, connectionManager, settings); + this.proxyAddress = proxyAddress; + this.maxNumRemoteConnections = maxNumRemoteConnections; + this.nodePredicate = nodePredicate; + this.configuredSeedNodes = configuredSeedNodes; + this.seedNodes = seedNodes; + } + + static Stream> enablementSettings() { + return Stream.of(ProtobufSniffConnectionStrategy.REMOTE_CLUSTER_SEEDS); + } + + static ProtobufWriteable.Reader infoReader() { + return SniffModeInfo::new; + } + + @Override + protected boolean shouldOpenMoreConnections() { + return connectionManager.size() < maxNumRemoteConnections; + } + + @Override + protected boolean strategyMustBeRebuilt(Settings newSettings) { + String proxy = REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterAlias).get(newSettings); + List addresses = REMOTE_CLUSTER_SEEDS.getConcreteSettingForNamespace(clusterAlias).get(newSettings); + int nodeConnections = REMOTE_NODE_CONNECTIONS.getConcreteSettingForNamespace(clusterAlias).get(newSettings); + return nodeConnections != maxNumRemoteConnections + || seedsChanged(configuredSeedNodes, addresses) + || proxyChanged(proxyAddress, proxy); + } + + @Override + protected ProtobufConnectionStrategy strategyType() { + return ProtobufConnectionStrategy.SNIFF; + } + + @Override + protected void connectImpl(ActionListener listener) { + collectRemoteNodes(seedNodes.iterator(), listener); + } + + @Override + protected ProtobufRemoteConnectionInfo.ModeInfo getModeInfo() { + return new SniffModeInfo(configuredSeedNodes, maxNumRemoteConnections, connectionManager.size()); + } + + private void collectRemoteNodes(Iterator> seedNodes, ActionListener listener) { + if (Thread.currentThread().isInterrupted()) { + listener.onFailure(new InterruptedException("remote connect thread got interrupted")); + return; + } + + if (seedNodes.hasNext()) { + final Consumer onFailure = e -> { + if (e instanceof ConnectTransportException || e instanceof IOException || e instanceof IllegalStateException) { + // ISE if we fail the handshake with an version incompatible node + if (seedNodes.hasNext()) { + logger.debug( + () -> new ParameterizedMessage( + "fetching nodes from external cluster [{}] failed moving to next seed node", + clusterAlias + ), + e + ); + collectRemoteNodes(seedNodes, listener); + return; + } + } + logger.warn(new ParameterizedMessage("fetching nodes from external cluster [{}] failed", clusterAlias), e); + listener.onFailure(e); + }; + + final ProtobufDiscoveryNode seedNode = seedNodes.next().get(); + logger.trace("[{}] opening transient connection to seed node: [{}]", clusterAlias, seedNode); + final StepListener openConnectionStep = new StepListener<>(); + try { + connectionManager.openConnection(seedNode, null, openConnectionStep); + } catch (Exception e) { + onFailure.accept(e); + } + + final StepListener handshakeStep = new StepListener<>(); + openConnectionStep.whenComplete(connection -> { + ProtobufConnectionProfile connectionProfile = connectionManager.getConnectionProfile(); + transportService.handshake( + connection, + connectionProfile.getHandshakeTimeout().millis(), + getRemoteClusterNamePredicate(), + handshakeStep + ); + }, onFailure); + + final StepListener fullConnectionStep = new StepListener<>(); + handshakeStep.whenComplete(handshakeResponse -> { + final ProtobufDiscoveryNode handshakeNode = handshakeResponse.getDiscoveryNode(); + + if (nodePredicate.test(handshakeNode) && shouldOpenMoreConnections()) { + logger.trace( + "[{}] opening managed connection to seed node: [{}] proxy address: [{}]", + clusterAlias, + handshakeNode, + proxyAddress + ); + final ProtobufDiscoveryNode handshakeNodeWithProxy = maybeAddProxyAddress(proxyAddress, handshakeNode); + connectionManager.connectToNode( + handshakeNodeWithProxy, + null, + transportService.connectionValidator(handshakeNodeWithProxy), + fullConnectionStep + ); + } else { + fullConnectionStep.onResponse(null); + } + }, e -> { + final ProtobufTransport.Connection connection = openConnectionStep.result(); + final ProtobufDiscoveryNode node = connection.getNode(); + logger.debug(() -> new ParameterizedMessage("[{}] failed to handshake with seed node: [{}]", clusterAlias, node), e); + IOUtils.closeWhileHandlingException(connection); + onFailure.accept(e); + }); + + fullConnectionStep.whenComplete(aVoid -> { + if (remoteClusterName.get() == null) { + ProtobufTransportService.HandshakeResponse handshakeResponse = handshakeStep.result(); + assert handshakeResponse.getClusterName().value() != null; + remoteClusterName.set(handshakeResponse.getClusterName()); + } + final ProtobufTransport.Connection connection = openConnectionStep.result(); + + ProtobufClusterStateRequest request = new ProtobufClusterStateRequest(); + request.clear(); + request.nodes(true); + // here we pass on the connection since we can only close it once the sendRequest returns otherwise + // due to the async nature (it will return before it's actually sent) this can cause the request to fail + // due to an already closed connection. + ProtobufThreadPool threadPool = transportService.getThreadPool(); + ThreadContext threadContext = threadPool.getThreadContext(); + ProtobufTransportService.ContextRestoreResponseHandler responseHandler = + new ProtobufTransportService.ContextRestoreResponseHandler<>( + threadContext.newRestorableContext(false), + new SniffClusterStateResponseHandler(connection, listener, seedNodes) + ); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we stash any context here since this is an internal execution and should not leak any + // existing context information. + threadContext.markAsSystemContext(); + transportService.sendRequest( + connection, + ClusterStateAction.NAME, + request, + TransportRequestOptions.EMPTY, + responseHandler + ); + } + }, e -> { + final ProtobufTransport.Connection connection = openConnectionStep.result(); + final ProtobufDiscoveryNode node = connection.getNode(); + logger.debug( + () -> new ParameterizedMessage("[{}] failed to open managed connection to seed node: [{}]", clusterAlias, node), + e + ); + IOUtils.closeWhileHandlingException(openConnectionStep.result()); + onFailure.accept(e); + }); + } else { + listener.onFailure(new NoSeedNodeLeftException(clusterAlias)); + } + } + + /* This class handles the _state response from the remote cluster when sniffing nodes to connect to */ + private class SniffClusterStateResponseHandler implements ProtobufTransportResponseHandler { + + private final ProtobufTransport.Connection connection; + private final ActionListener listener; + private final Iterator> seedNodes; + + SniffClusterStateResponseHandler( + ProtobufTransport.Connection connection, + ActionListener listener, + Iterator> seedNodes + ) { + this.connection = connection; + this.listener = listener; + this.seedNodes = seedNodes; + } + + @Override + public ProtobufClusterStateResponse read(CodedInputStream in) throws IOException { + return new ProtobufClusterStateResponse(in); + } + + @Override + public void handleResponse(ProtobufClusterStateResponse response) { + handleNodes(response.getState().nodes().getNodes().valuesIt()); + } + + private void handleNodes(Iterator nodesIter) { + while (nodesIter.hasNext()) { + final ProtobufDiscoveryNode node = nodesIter.next(); + if (nodePredicate.test(node) && shouldOpenMoreConnections()) { + logger.trace("[{}] opening managed connection to node: [{}] proxy address: [{}]", clusterAlias, node, proxyAddress); + final ProtobufDiscoveryNode nodeWithProxy = maybeAddProxyAddress(proxyAddress, node); + connectionManager.connectToNode( + nodeWithProxy, + null, + transportService.connectionValidator(node), + new ActionListener() { + @Override + public void onResponse(Void aVoid) { + handleNodes(nodesIter); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof ConnectTransportException || e instanceof IllegalStateException) { + // ISE if we fail the handshake with an version incompatible node + // fair enough we can't connect just move on + logger.debug( + () -> new ParameterizedMessage( + "[{}] failed to open managed connection to node [{}]", + clusterAlias, + node + ), + e + ); + handleNodes(nodesIter); + } else { + logger.warn( + new ParameterizedMessage("[{}] failed to open managed connection to node [{}]", clusterAlias, node), + e + ); + IOUtils.closeWhileHandlingException(connection); + collectRemoteNodes(seedNodes, listener); + } + } + } + ); + return; + } + } + // We have to close this connection before we notify listeners - this is mainly needed for test correctness + // since if we do it afterwards we might fail assertions that check if all high level connections are closed. + // from a code correctness perspective we could also close it afterwards. + IOUtils.closeWhileHandlingException(connection); + int openConnections = connectionManager.size(); + if (openConnections == 0) { + listener.onFailure(new IllegalStateException("Unable to open any connections to remote cluster [" + clusterAlias + "]")); + } else { + listener.onResponse(null); + } + } + + @Override + public void handleException(ProtobufTransportException exp) { + logger.warn(new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), exp); + try { + IOUtils.closeWhileHandlingException(connection); + } finally { + // once the connection is closed lets try the next node + collectRemoteNodes(seedNodes, listener); + } + } + + @Override + public String executor() { + return ProtobufThreadPool.Names.MANAGEMENT; + } + } + + private Predicate getRemoteClusterNamePredicate() { + return new Predicate() { + @Override + public boolean test(ProtobufClusterName c) { + return remoteClusterName.get() == null || c.equals(remoteClusterName.get()); + } + + @Override + public String toString() { + return remoteClusterName.get() == null + ? "any cluster name" + : "expected remote cluster name [" + remoteClusterName.get().value() + "]"; + } + }; + } + + private static ProtobufDiscoveryNode resolveSeedNode(String clusterAlias, String address, String proxyAddress) { + if (proxyAddress == null || proxyAddress.isEmpty()) { + ProtobufTransportAddress transportAddress = new ProtobufTransportAddress(parseConfiguredAddress(address)); + return new ProtobufDiscoveryNode( + clusterAlias + "#" + transportAddress.toString(), + transportAddress, + Version.CURRENT.minimumCompatibilityVersion() + ); + } else { + ProtobufTransportAddress transportAddress = new ProtobufTransportAddress(parseConfiguredAddress(proxyAddress)); + String hostName = ProtobufRemoteConnectionStrategy.parseHost(proxyAddress); + return new ProtobufDiscoveryNode( + "", + clusterAlias + "#" + address, + UUIDs.randomBase64UUID(), + hostName, + address, + transportAddress, + Collections.singletonMap("server_name", hostName), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT.minimumCompatibilityVersion() + ); + } + } + + // Default visibility for tests + static Predicate getNodePredicate(Settings settings) { + if (RemoteClusterService.REMOTE_NODE_ATTRIBUTE.exists(settings)) { + // nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for cross cluster search + String attribute = RemoteClusterService.REMOTE_NODE_ATTRIBUTE.get(settings); + return DEFAULT_NODE_PREDICATE.and((node) -> Booleans.parseBoolean(node.getAttributes().getOrDefault(attribute, "false"))); + } + return DEFAULT_NODE_PREDICATE; + } + + private static ProtobufDiscoveryNode maybeAddProxyAddress(String proxyAddress, ProtobufDiscoveryNode node) { + if (proxyAddress == null || proxyAddress.isEmpty()) { + return node; + } else { + // resolve proxy address lazy here + InetSocketAddress proxyInetAddress = parseConfiguredAddress(proxyAddress); + return new ProtobufDiscoveryNode( + node.getName(), + node.getId(), + node.getEphemeralId(), + node.getHostName(), + node.getHostAddress(), + new ProtobufTransportAddress(proxyInetAddress), + node.getAttributes(), + node.getRoles(), + node.getVersion() + ); + } + } + + private boolean seedsChanged(final List oldSeedNodes, final List newSeedNodes) { + if (oldSeedNodes.size() != newSeedNodes.size()) { + return true; + } + Set oldSeeds = new HashSet<>(oldSeedNodes); + Set newSeeds = new HashSet<>(newSeedNodes); + return oldSeeds.equals(newSeeds) == false; + } + + private boolean proxyChanged(String oldProxy, String newProxy) { + if (oldProxy == null || oldProxy.isEmpty()) { + return (newProxy == null || newProxy.isEmpty()) == false; + } + + return Objects.equals(oldProxy, newProxy) == false; + } + + /** + * Information about the sniff mode + * + * @opensearch.internal + */ + public static class SniffModeInfo implements ProtobufRemoteConnectionInfo.ModeInfo { + + final List seedNodes; + final int maxConnectionsPerCluster; + final int numNodesConnected; + + public SniffModeInfo(List seedNodes, int maxConnectionsPerCluster, int numNodesConnected) { + this.seedNodes = seedNodes; + this.maxConnectionsPerCluster = maxConnectionsPerCluster; + this.numNodesConnected = numNodesConnected; + } + + private SniffModeInfo(CodedInputStream input) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(input); + seedNodes = Arrays.asList(protobufStreamInput.readStringArray()); + maxConnectionsPerCluster = protobufStreamInput.readVInt(); + numNodesConnected = protobufStreamInput.readVInt(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("seeds"); + for (String address : seedNodes) { + builder.value(address); + } + builder.endArray(); + builder.field("num_nodes_connected", numNodesConnected); + builder.field("max_connections_per_cluster", maxConnectionsPerCluster); + return builder; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeStringArray(seedNodes.toArray(new String[0])); + out.writeInt32NoTag(maxConnectionsPerCluster); + out.writeInt32NoTag(numNodesConnected); + } + + @Override + public boolean isConnected() { + return numNodesConnected > 0; + } + + @Override + public String modeName() { + return "sniff"; + } + + public List getSeedNodes() { + return seedNodes; + } + + public int getMaxConnectionsPerCluster() { + return maxConnectionsPerCluster; + } + + public int getNumNodesConnected() { + return numNodesConnected; + } + + @Override + public ProtobufRemoteConnectionStrategy.ProtobufConnectionStrategy modeType() { + return ProtobufRemoteConnectionStrategy.ProtobufConnectionStrategy.SNIFF; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SniffModeInfo sniff = (SniffModeInfo) o; + return maxConnectionsPerCluster == sniff.maxConnectionsPerCluster + && numNodesConnected == sniff.numNodesConnected + && Objects.equals(seedNodes, sniff.seedNodes); + } + + @Override + public int hashCode() { + return Objects.hash(seedNodes, maxConnectionsPerCluster, numNodesConnected); + } + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTcpTransportChannel.java b/server/src/main/java/org/opensearch/transport/ProtobufTcpTransportChannel.java index 81aa03275d263..03daaec9409a9 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTcpTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTcpTransportChannel.java @@ -10,8 +10,6 @@ import org.opensearch.Version; import org.opensearch.common.lease.Releasable; -import org.opensearch.search.query.QuerySearchResult; - import java.io.IOException; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -24,7 +22,7 @@ public final class ProtobufTcpTransportChannel implements ProtobufTransportChannel { private final AtomicBoolean released = new AtomicBoolean(); - private final OutboundHandler outboundHandler; + private final ProtobufOutboundHandler outboundHandler; private final TcpChannel channel; private final String action; private final long requestId; @@ -35,7 +33,7 @@ public final class ProtobufTcpTransportChannel implements ProtobufTransportChann private final Releasable breakerRelease; ProtobufTcpTransportChannel( - OutboundHandler outboundHandler, + ProtobufOutboundHandler outboundHandler, TcpChannel channel, String action, long requestId, @@ -65,8 +63,8 @@ public String getProfileName() { public void sendResponse(ProtobufTransportResponse response) throws IOException { try { // if (response instanceof QuerySearchResult && ((QuerySearchResult) response).getShardSearchRequest() != null) { - // // update outbound network time with current time before sending response over network - // ((QuerySearchResult) response).getShardSearchRequest().setOutboundNetworkTime(System.currentTimeMillis()); + // // update outbound network time with current time before sending response over network + // ((QuerySearchResult) response).getShardSearchRequest().setOutboundNetworkTime(System.currentTimeMillis()); // } outboundHandler.sendResponse(version, features, channel, requestId, action, response, compressResponse, isHandshake); } finally { diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransport.java b/server/src/main/java/org/opensearch/transport/ProtobufTransport.java index 08d0ad17de49c..9a9890255fa67 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransport.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransport.java @@ -10,11 +10,11 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; -import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.component.LifecycleComponent; import org.opensearch.common.transport.ProtobufBoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ConcurrentMapLong; @@ -39,7 +39,7 @@ public interface ProtobufTransport extends LifecycleComponent { /** * Registers a new request handler */ - default void registerRequestHandler(RequestHandlerRegistry reg) { + default void registerRequestHandler(ProtobufRequestHandlerRegistry reg) { getRequestHandlers().registerHandler(reg); } @@ -65,7 +65,7 @@ default boolean isSecure() { /** * Returns an address from its string representation. */ - TransportAddress[] addressesFromString(String address) throws UnknownHostException; + ProtobufTransportAddress[] addressesFromString(String address) throws UnknownHostException; /** * Returns a list of all local addresses for this transport @@ -76,7 +76,11 @@ default boolean isSecure() { * Opens a new connection to the given node. When the connection is fully connected, the listener is called. * The ActionListener will be called on the calling thread or the generic thread pool. */ - void openConnection(DiscoveryNode node, ConnectionProfile profile, ActionListener listener); + void openConnection( + ProtobufDiscoveryNode node, + ProtobufConnectionProfile profile, + ActionListener listener + ); TransportStats getStats(); @@ -85,13 +89,13 @@ default boolean isSecure() { RequestHandlers getRequestHandlers(); /** - * A unidirectional connection to a {@link DiscoveryNode} + * A unidirectional connection to a {@link ProtobufDiscoveryNode} */ interface Connection extends Closeable { /** * The node this connection is associated with */ - DiscoveryNode getNode(); + ProtobufDiscoveryNode getNode(); /** * Sends the request to the node this connection is associated with @@ -101,8 +105,8 @@ interface Connection extends Closeable { * @param options request options to apply * @throws NodeNotConnectedException if the given node is not connected */ - void sendRequest(long requestId, String action, ProtobufTransportRequest request, TransportRequestOptions options) throws IOException, - TransportException; + void sendRequest(long requestId, String action, ProtobufTransportRequest request, TransportRequestOptions options) + throws IOException, TransportException; /** * The listener's {@link ActionListener#onResponse(Object)} method will be called when this @@ -138,21 +142,21 @@ default Object getCacheKey() { * This class represents a response context that encapsulates the actual response handler, the action and the connection it was * executed on. */ - final class ResponseContext { + final class ResponseContext { - private final TransportResponseHandler handler; + private final ProtobufTransportResponseHandler handler; private final Connection connection; private final String action; - ResponseContext(TransportResponseHandler handler, Connection connection, String action) { + ResponseContext(ProtobufTransportResponseHandler handler, Connection connection, String action) { this.handler = handler; this.connection = connection; this.action = action; } - public TransportResponseHandler handler() { + public ProtobufTransportResponseHandler handler() { return handler; } @@ -169,7 +173,7 @@ public String action() { * This class is a registry that allows */ final class ResponseHandlers { - private final ConcurrentMapLong> handlers = ConcurrentCollections + private final ConcurrentMapLong> handlers = ConcurrentCollections .newConcurrentMapLongWithAggressiveConcurrency(); private final AtomicLong requestIdGenerator = new AtomicLong(); @@ -184,7 +188,7 @@ public boolean contains(long requestId) { * Removes and return the {@link ResponseContext} for the given request ID or returns * null if no context is associated with this request ID. */ - public ResponseContext remove(long requestId) { + public ResponseContext remove(long requestId) { return handlers.remove(requestId); } @@ -193,9 +197,9 @@ public ResponseContext remove(long requestId) { * @return the new request ID * @see Connection#sendRequest(long, String, ProtobufTransportRequest, TransportRequestOptions) */ - public long add(ResponseContext holder) { + public long add(ResponseContext holder) { long requestId = newRequestId(); - ResponseContext existing = handlers.put(requestId, holder); + ResponseContext existing = handlers.put(requestId, holder); assert existing == null : "request ID already in use: " + requestId; return requestId; } @@ -211,12 +215,14 @@ long newRequestId() { /** * Removes and returns all {@link ResponseContext} instances that match the predicate */ - public List> prune(Predicate> predicate) { - final List> holders = new ArrayList<>(); - for (Map.Entry> entry : handlers.entrySet()) { - ResponseContext holder = entry.getValue(); + public List> prune( + Predicate> predicate + ) { + final List> holders = new ArrayList<>(); + for (Map.Entry> entry : handlers.entrySet()) { + ResponseContext holder = entry.getValue(); if (predicate.test(holder)) { - ResponseContext remove = handlers.remove(entry.getKey()); + ResponseContext remove = handlers.remove(entry.getKey()); if (remove != null) { holders.add(holder); } @@ -230,11 +236,11 @@ public List> prune(Predicate onResponseReceived( + public ProtobufTransportResponseHandler onResponseReceived( final long requestId, final ProtobufTransportMessageListener listener ) { - ResponseContext context = handlers.remove(requestId); + ResponseContext context = handlers.remove(requestId); listener.onResponseReceived(requestId, context); if (context == null) { return null; @@ -251,9 +257,10 @@ public TransportResponseHandler onResponseReceived( */ final class RequestHandlers { - private volatile Map> requestHandlers = Collections.emptyMap(); + private volatile Map> requestHandlers = Collections + .emptyMap(); - synchronized void registerHandler(RequestHandlerRegistry reg) { + synchronized void registerHandler(ProtobufRequestHandlerRegistry reg) { if (requestHandlers.containsKey(reg.getAction())) { throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); } @@ -262,13 +269,13 @@ synchronized void registerHandler(Req // TODO: Only visible for testing. Perhaps move StubbableTransport from // org.opensearch.test.transport to org.opensearch.transport - public synchronized void forceRegister(RequestHandlerRegistry reg) { + public synchronized void forceRegister(ProtobufRequestHandlerRegistry reg) { requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); } @SuppressWarnings("unchecked") - public RequestHandlerRegistry getHandler(String action) { - return (RequestHandlerRegistry) requestHandlers.get(action); + public ProtobufRequestHandlerRegistry getHandler(String action) { + return (ProtobufRequestHandlerRegistry) requestHandlers.get(action); } } } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java new file mode 100644 index 0000000000000..5c0fdea673c9d --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java @@ -0,0 +1,226 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.threadpool.ProtobufThreadPool; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.function.Function; + +/** + * ProtobufTransportActionProxy allows an arbitrary action to be executed on a defined target node while the initial request is sent to a second +* node that acts as a request proxy to the target node. This is useful if a node is not directly connected to a target node but is +* connected to an intermediate node that establishes a transitive connection. +* +* @opensearch.internal +*/ +public final class ProtobufTransportActionProxy { + + private ProtobufTransportActionProxy() {} // no instance + + /** + * Handler for proxy requests + * + * @opensearch.internal + */ + private static class ProxyRequestHandler implements ProtobufTransportRequestHandler { + + private final ProtobufTransportService service; + private final String action; + private final Function> responseFunction; + + ProxyRequestHandler( + ProtobufTransportService service, + String action, + Function> responseFunction + ) { + this.service = service; + this.action = action; + this.responseFunction = responseFunction; + } + + @Override + public void messageReceived(T request, ProtobufTransportChannel channel, ProtobufTask task) throws Exception { + ProtobufDiscoveryNode targetNode = request.targetNode; + ProtobufTransportRequest wrappedRequest = request.wrapped; + service.sendRequest( + targetNode, + action, + wrappedRequest, + new ProxyResponseHandler<>(channel, responseFunction.apply(wrappedRequest)) + ); + } + } + + /** + * Handler for the proxy response + * + * @opensearch.internal + */ + private static class ProxyResponseHandler implements ProtobufTransportResponseHandler { + + private final ProtobufWriteable.Reader reader; + private final ProtobufTransportChannel channel; + + ProxyResponseHandler(ProtobufTransportChannel channel, ProtobufWriteable.Reader reader) { + this.reader = reader; + this.channel = channel; + } + + @Override + public T read(CodedInputStream in) throws IOException { + return reader.read(in); + } + + @Override + public void handleResponse(T response) { + try { + channel.sendResponse(response); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + public void handleException(ProtobufTransportException exp) { + try { + channel.sendResponse(exp); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + public String executor() { + return ProtobufThreadPool.Names.SAME; + } + } + + /** + * The proxy request + * + * @opensearch.internal + */ + static class ProxyRequest extends ProtobufTransportRequest { + final T wrapped; + final ProtobufDiscoveryNode targetNode; + + ProxyRequest(T wrapped, ProtobufDiscoveryNode targetNode) { + this.wrapped = wrapped; + this.targetNode = targetNode; + } + + ProxyRequest(CodedInputStream in, ProtobufWriteable.Reader reader) throws IOException { + super(in); + targetNode = new ProtobufDiscoveryNode(in); + wrapped = reader.read(in); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + targetNode.writeTo(out); + wrapped.writeTo(out); + } + } + + /** + * Registers a proxy request handler that allows to forward requests for the given action to another node. To be used when the + * response type changes based on the upcoming request (quite rare) + */ + public static void registerProxyActionWithDynamicResponseType( + ProtobufTransportService service, + String action, + Function> responseFunction + ) { + ProtobufRequestHandlerRegistry requestHandler = service.getRequestHandler(action); + service.registerRequestHandler( + getProxyAction(action), + ProtobufThreadPool.Names.SAME, + true, + false, + in -> new ProxyRequest<>(in, requestHandler::newRequest), + new ProxyRequestHandler<>(service, action, responseFunction) + ); + } + + /** + * Registers a proxy request handler that allows to forward requests for the given action to another node. To be used when the + * response type is always the same (most of the cases). + */ + public static void registerProxyAction( + ProtobufTransportService service, + String action, + ProtobufWriteable.Reader reader + ) { + ProtobufRequestHandlerRegistry requestHandler = service.getRequestHandler(action); + service.registerRequestHandler( + getProxyAction(action), + ProtobufThreadPool.Names.SAME, + true, + false, + in -> new ProxyRequest<>(in, requestHandler::newRequest), + new ProxyRequestHandler<>(service, action, request -> reader) + ); + } + + private static final String PROXY_ACTION_PREFIX = "internal:transport/proxy/"; + + /** + * Returns the corresponding proxy action for the given action + */ + public static String getProxyAction(String action) { + return PROXY_ACTION_PREFIX + action; + } + + /** + * Wraps the actual request in a proxy request object that encodes the target node. + */ + public static ProtobufTransportRequest wrapRequest(ProtobufDiscoveryNode node, ProtobufTransportRequest request) { + return new ProxyRequest<>(request, node); + } + + /** + * Unwraps a proxy request and returns the original request + */ + public static ProtobufTransportRequest unwrapRequest(ProtobufTransportRequest request) { + if (request instanceof ProxyRequest) { + return ((ProxyRequest) request).wrapped; + } + return request; + } + + /** + * Unwraps a proxy action and returns the underlying action + */ + public static String unwrapAction(String action) { + assert isProxyAction(action) : "Attempted to unwrap non-proxy action: " + action; + return action.substring(PROXY_ACTION_PREFIX.length()); + } + + /** + * Returns true iff the given action is a proxy action + */ + public static boolean isProxyAction(String action) { + return action.startsWith(PROXY_ACTION_PREFIX); + } + + /** + * Returns true iff the given request is a proxy request + */ + public static boolean isProxyRequest(ProtobufTransportRequest request) { + return request instanceof ProxyRequest; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java new file mode 100644 index 0000000000000..2f348b72e1a5a --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java @@ -0,0 +1,43 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.cluster.node.ProtobufDiscoveryNode; + +/** + * A listener interface that allows to react on transport events. All methods may be +* executed on network threads. Consumers must fork in the case of long running or blocking +* operations. +* +* @opensearch.internal +*/ +public interface ProtobufTransportConnectionListener { + + /** + * Called once a connection was opened + * @param connection the connection + */ + default void onConnectionOpened(ProtobufTransport.Connection connection) {} + + /** + * Called once a connection ws closed. + * @param connection the closed connection + */ + default void onConnectionClosed(ProtobufTransport.Connection connection) {} + + /** + * Called once a node connection is opened and registered. + */ + default void onNodeConnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) {} + + /** + * Called once a node connection is closed and unregistered. + */ + default void onNodeDisconnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) {} +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportException.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportException.java new file mode 100644 index 0000000000000..42bca65d8c1fd --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportException.java @@ -0,0 +1,37 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.ProtobufOpenSearchException; + +import java.io.IOException; + +/** + * Thrown for any transport errors +* +* @opensearch.internal +*/ +public class ProtobufTransportException extends ProtobufOpenSearchException { + public ProtobufTransportException(Throwable cause) { + super(cause); + } + + public ProtobufTransportException(CodedInputStream in) throws IOException { + super(in); + } + + public ProtobufTransportException(String msg) { + super(msg); + } + + public ProtobufTransportException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java new file mode 100644 index 0000000000000..2476882515de1 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java @@ -0,0 +1,59 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.common.io.stream.ProtobufWriteable.Reader; + +/** + * This interface allows plugins to intercept requests on both the sender and the receiver side. +* +* @opensearch.internal +*/ +public interface ProtobufTransportInterceptor { + /** + * This is called for each handler that is registered via + * {@link ProtobufTransportService#registerRequestHandler(String, String, boolean, boolean, Reader, ProtobufTransportRequestHandler)} or + * {@link ProtobufTransportService#registerRequestHandler(String, String, Reader, ProtobufTransportRequestHandler)}. The returned handler is + * used instead of the passed in handler. By default the provided handler is returned. + */ + default ProtobufTransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + ProtobufTransportRequestHandler actualHandler + ) { + return actualHandler; + } + + /** + * This is called up-front providing the actual low level {@link AsyncSender} that performs the low level send request. + * The returned sender is used to send all requests that come in via + * {@link ProtobufTransportService#sendRequest(ProtobufDiscoveryNode, String, ProtobufTransportRequest, ProtobufTransportResponseHandler)} or + * {@link ProtobufTransportService#sendRequest(ProtobufDiscoveryNode, String, ProtobufTransportRequest, TransportRequestOptions, ProtobufTransportResponseHandler)}. + * This allows plugins to perform actions on each send request including modifying the request context etc. + */ + default AsyncSender interceptSender(AsyncSender sender) { + return sender; + } + + /** + * A simple interface to decorate + * {@link #sendRequest(ProtobufTransport.Connection, String, ProtobufTransportRequest, TransportRequestOptions, ProtobufTransportResponseHandler)} + */ + interface AsyncSender { + void sendRequest( + ProtobufTransport.Connection connection, + String action, + ProtobufTransportRequest request, + TransportRequestOptions options, + ProtobufTransportResponseHandler handler + ); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java index 6eb9b7fd72f4e..abfc5c91e7645 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java @@ -9,7 +9,6 @@ package org.opensearch.transport; import org.opensearch.tasks.ProtobufTask; -import org.opensearch.tasks.Task; /** * Handles transport requests @@ -18,5 +17,5 @@ */ public interface ProtobufTransportRequestHandler { - void messageReceived(T request, TransportChannel channel, ProtobufTask task) throws Exception; + void messageReceived(T request, ProtobufTransportChannel channel, ProtobufTask task) throws Exception; } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportResponseHandler.java new file mode 100644 index 0000000000000..6da728b512185 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportResponseHandler.java @@ -0,0 +1,57 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; + +import java.io.IOException; +import java.util.function.Function; + +/** + * Handles transport responses +* +* @opensearch.internal +*/ +public interface ProtobufTransportResponseHandler extends ProtobufWriteable.Reader { + + void handleResponse(T response); + + void handleException(ProtobufTransportException exp); + + String executor(); + + default ProtobufTransportResponseHandler wrap( + Function converter, + ProtobufWriteable.Reader reader + ) { + final ProtobufTransportResponseHandler self = this; + return new ProtobufTransportResponseHandler() { + @Override + public void handleResponse(Q response) { + self.handleResponse(converter.apply(response)); + } + + @Override + public void handleException(ProtobufTransportException exp) { + self.handleException(exp); + } + + @Override + public String executor() { + return self.executor(); + } + + @Override + public Q read(CodedInputStream in) throws IOException { + return reader.read(in); + } + }; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java index 369419be1c8f6..9f5107009d977 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java @@ -8,39 +8,41 @@ package org.opensearch.transport; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionListenerResponseHandler; +import org.opensearch.action.ProtobufActionListenerResponseHandler; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.cluster.ClusterName; -import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.component.AbstractLifecycleComponent; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.logging.Loggers; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.ProtobufBoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.node.NodeClosedException; +import org.opensearch.node.ProtobufNodeClosedException; import org.opensearch.node.ProtobufReportingService; -import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskManager; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.tasks.ProtobufTaskManager; import org.opensearch.threadpool.Scheduler; -import org.opensearch.threadpool.ThreadPool; +import org.opensearch.threadpool.ProtobufThreadPool; import java.io.IOException; import java.io.UncheckedIOException; @@ -68,8 +70,8 @@ public class ProtobufTransportService extends AbstractLifecycleComponent implements ProtobufReportingService, - TransportMessageListener, - TransportConnectionListener { + ProtobufTransportMessageListener, + ProtobufTransportConnectionListener { private static final Logger logger = LogManager.getLogger(ProtobufTransportService.class); public static final String DIRECT_RESPONSE_PROFILE = ".direct"; @@ -77,16 +79,16 @@ public class ProtobufTransportService extends AbstractLifecycleComponent private final AtomicBoolean handleIncomingRequests = new AtomicBoolean(); private final DelegatingTransportMessageListener messageListener = new DelegatingTransportMessageListener(); - protected final Transport transport; - protected final ConnectionManager connectionManager; - protected final ThreadPool threadPool; - protected final ClusterName clusterName; - protected final TaskManager taskManager; - private final TransportInterceptor.AsyncSender asyncSender; - private final Function localNodeFactory; + protected final ProtobufTransport transport; + protected final ProtobufConnectionManager connectionManager; + protected final ProtobufThreadPool threadPool; + protected final ProtobufClusterName clusterName; + protected final ProtobufTaskManager taskManager; + private final ProtobufTransportInterceptor.AsyncSender asyncSender; + private final Function localNodeFactory; private final boolean remoteClusterClient; - private final Transport.ResponseHandlers responseHandlers; - private final TransportInterceptor interceptor; + private final ProtobufTransport.ResponseHandlers responseHandlers; + private final ProtobufTransportInterceptor interceptor; // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they // do show up, we can print more descriptive information about them @@ -99,7 +101,7 @@ protected boolean removeEldestEntry(Map.Entry eldest) { } ); - public static final TransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new TransportInterceptor() { + public static final ProtobufTransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new ProtobufTransportInterceptor() { }; // tracer log @@ -109,19 +111,19 @@ protected boolean removeEldestEntry(Map.Entry eldest) { volatile String[] tracerLogInclude; volatile String[] tracerLogExclude; - private final RemoteClusterService remoteClusterService; + private final ProtobufRemoteClusterService remoteClusterService; /** if set will call requests sent to this id to shortcut and executed locally */ - volatile DiscoveryNode localNode = null; - private final Transport.Connection localNodeConnection = new Transport.Connection() { + volatile ProtobufDiscoveryNode localNode = null; + private final ProtobufTransport.Connection localNodeConnection = new ProtobufTransport.Connection() { @Override - public DiscoveryNode getNode() { + public ProtobufDiscoveryNode getNode() { return localNode; } @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws TransportException { + public void sendRequest(long requestId, String action, ProtobufTransportRequest request, TransportRequestOptions options) + throws ProtobufTransportException { sendLocalRequest(requestId, action, request, options); } @@ -145,10 +147,10 @@ public void close() {} */ public ProtobufTransportService( Settings settings, - Transport transport, - ThreadPool threadPool, - TransportInterceptor transportInterceptor, - Function localNodeFactory, + ProtobufTransport transport, + ProtobufThreadPool threadPool, + ProtobufTransportInterceptor transportInterceptor, + Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders ) { @@ -160,34 +162,34 @@ public ProtobufTransportService( localNodeFactory, clusterSettings, taskHeaders, - new ClusterConnectionManager(settings, transport) + new ProtobufClusterConnectionManager(settings, transport) ); } public ProtobufTransportService( Settings settings, - Transport transport, - ThreadPool threadPool, - TransportInterceptor transportInterceptor, - Function localNodeFactory, + ProtobufTransport transport, + ProtobufThreadPool threadPool, + ProtobufTransportInterceptor transportInterceptor, + Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders, - ConnectionManager connectionManager + ProtobufConnectionManager connectionManager ) { this.transport = transport; transport.setSlowLogThreshold(TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING.get(settings)); this.threadPool = threadPool; this.localNodeFactory = localNodeFactory; this.connectionManager = connectionManager; - this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); + this.clusterName = ProtobufClusterName.CLUSTER_NAME_SETTING.get(settings); setTracerLogInclude(TransportSettings.TRACE_LOG_INCLUDE_SETTING.get(settings)); setTracerLogExclude(TransportSettings.TRACE_LOG_EXCLUDE_SETTING.get(settings)); tracerLog = Loggers.getLogger(logger, ".tracer"); taskManager = createTaskManager(settings, clusterSettings, threadPool, taskHeaders); this.interceptor = transportInterceptor; this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); - this.remoteClusterClient = DiscoveryNode.isRemoteClusterClient(settings); - remoteClusterService = new RemoteClusterService(settings, this); + this.remoteClusterClient = ProtobufDiscoveryNode.isRemoteClusterClient(settings); + remoteClusterService = new ProtobufRemoteClusterService(settings, this); responseHandlers = transport.getResponseHandlers(); if (clusterSettings != null) { clusterSettings.addSettingsUpdateConsumer(TransportSettings.TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude); @@ -199,7 +201,7 @@ public ProtobufTransportService( } registerRequestHandler( HANDSHAKE_ACTION_NAME, - ThreadPool.Names.SAME, + ProtobufThreadPool.Names.SAME, false, false, HandshakeRequest::new, @@ -207,28 +209,28 @@ public ProtobufTransportService( ); } - public RemoteClusterService getRemoteClusterService() { + public ProtobufRemoteClusterService getRemoteClusterService() { return remoteClusterService; } - public DiscoveryNode getLocalNode() { + public ProtobufDiscoveryNode getLocalNode() { return localNode; } - public TaskManager getTaskManager() { + public ProtobufTaskManager getTaskManager() { return taskManager; } - protected TaskManager createTaskManager( + protected ProtobufTaskManager createTaskManager( Settings settings, ClusterSettings clusterSettings, - ThreadPool threadPool, + ProtobufThreadPool threadPool, Set taskHeaders ) { if (clusterSettings != null) { - return TaskManager.createTaskManagerWithClusterSettings(settings, clusterSettings, threadPool, taskHeaders); + return ProtobufTaskManager.createTaskManagerWithClusterSettings(settings, clusterSettings, threadPool, taskHeaders); } else { - return new TaskManager(settings, threadPool, taskHeaders); + return new ProtobufTaskManager(settings, threadPool, taskHeaders); } } @@ -277,7 +279,7 @@ protected void doStop() { } finally { // in case the transport is not connected to our local node (thus cleaned on node disconnect) // make sure to clean any leftover on going handles - for (final Transport.ResponseContext holderToNotify : responseHandlers.prune(h -> true)) { + for (final ProtobufTransport.ResponseContext holderToNotify : responseHandlers.prune(h -> true)) { // callback that an exception happened, but on a different thread since we don't // want handlers to worry about stack overflows getExecutorService().execute(new AbstractRunnable() { @@ -306,10 +308,10 @@ public void onFailure(Exception e) { @Override public void doRun() { - TransportException ex = new SendRequestTransportException( + ProtobufTransportException ex = new ProtobufSendRequestTransportException( holderToNotify.connection().getNode(), holderToNotify.action(), - new NodeClosedException(localNode) + new ProtobufNodeClosedException(localNode) ); holderToNotify.handler().handleException(ex); } @@ -360,7 +362,7 @@ public List getDefaultSeedAddresses() { /** * Returns true iff the given node is already connected. */ - public boolean nodeConnected(DiscoveryNode node) { + public boolean nodeConnected(ProtobufDiscoveryNode node) { return isLocalNode(node) || connectionManager.nodeConnected(node); } @@ -369,13 +371,13 @@ public boolean nodeConnected(DiscoveryNode node) { * * @param node the node to connect to */ - public void connectToNode(DiscoveryNode node) throws ConnectTransportException { - connectToNode(node, (ConnectionProfile) null); + public void connectToNode(ProtobufDiscoveryNode node) throws ProtobufConnectTransportException { + connectToNode(node, (ProtobufConnectionProfile) null); } // We are skipping node validation for extensibility as extensionNode and opensearchNode(LocalNode) will have different ephemeral id's - public void connectToExtensionNode(final DiscoveryNode node) { - PlainActionFuture.get(fut -> connectToExtensionNode(node, (ConnectionProfile) null, ActionListener.map(fut, x -> null))); + public void connectToExtensionNode(final ProtobufDiscoveryNode node) { + PlainActionFuture.get(fut -> connectToExtensionNode(node, (ProtobufConnectionProfile) null, ActionListener.map(fut, x -> null))); } /** @@ -384,11 +386,11 @@ public void connectToExtensionNode(final DiscoveryNode node) { * @param node the node to connect to * @param connectionProfile the connection profile to use when connecting to this node */ - public void connectToNode(final DiscoveryNode node, ConnectionProfile connectionProfile) { + public void connectToNode(final ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile) { PlainActionFuture.get(fut -> connectToNode(node, connectionProfile, ActionListener.map(fut, x -> null))); } - public void connectToExtensionNode(final DiscoveryNode node, ConnectionProfile connectionProfile) { + public void connectToExtensionNode(final ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile) { PlainActionFuture.get(fut -> connectToExtensionNode(node, connectionProfile, ActionListener.map(fut, x -> null))); } @@ -399,11 +401,11 @@ public void connectToExtensionNode(final DiscoveryNode node, ConnectionProfile c * @param node the node to connect to * @param listener the action listener to notify */ - public void connectToNode(DiscoveryNode node, ActionListener listener) throws ConnectTransportException { + public void connectToNode(ProtobufDiscoveryNode node, ActionListener listener) throws ProtobufConnectTransportException { connectToNode(node, null, listener); } - public void connectToExtensionNode(DiscoveryNode node, ActionListener listener) throws ConnectTransportException { + public void connectToExtensionNode(ProtobufDiscoveryNode node, ActionListener listener) throws ProtobufConnectTransportException { connectToExtensionNode(node, null, listener); } @@ -415,7 +417,11 @@ public void connectToExtensionNode(DiscoveryNode node, ActionListener list * @param connectionProfile the connection profile to use when connecting to this node * @param listener the action listener to notify */ - public void connectToNode(final DiscoveryNode node, ConnectionProfile connectionProfile, ActionListener listener) { + public void connectToNode( + final ProtobufDiscoveryNode node, + ProtobufConnectionProfile connectionProfile, + ActionListener listener + ) { if (isLocalNode(node)) { listener.onResponse(null); return; @@ -423,7 +429,11 @@ public void connectToNode(final DiscoveryNode node, ConnectionProfile connection connectionManager.connectToNode(node, connectionProfile, connectionValidator(node), listener); } - public void connectToExtensionNode(final DiscoveryNode node, ConnectionProfile connectionProfile, ActionListener listener) { + public void connectToExtensionNode( + final ProtobufDiscoveryNode node, + ProtobufConnectionProfile connectionProfile, + ActionListener listener + ) { if (isLocalNode(node)) { listener.onResponse(null); return; @@ -431,14 +441,14 @@ public void connectToExtensionNode(final DiscoveryNode node, ConnectionProfile c connectionManager.connectToNode(node, connectionProfile, extensionConnectionValidator(node), listener); } - public ConnectionManager.ConnectionValidator connectionValidator(DiscoveryNode node) { + public ProtobufConnectionManager.ConnectionValidator connectionValidator(ProtobufDiscoveryNode node) { return (newConnection, actualProfile, listener) -> { // We don't validate cluster names to allow for CCS connections. handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { - final DiscoveryNode remote = resp.discoveryNode; + final ProtobufDiscoveryNode remote = resp.discoveryNode; if (node.equals(remote) == false) { - throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); + throw new ProtobufConnectTransportException(node, "handshake failed. unexpected remote node " + remote); } return null; @@ -446,11 +456,11 @@ public ConnectionManager.ConnectionValidator connectionValidator(DiscoveryNode n }; } - public ConnectionManager.ConnectionValidator extensionConnectionValidator(DiscoveryNode node) { + public ProtobufConnectionManager.ConnectionValidator extensionConnectionValidator(ProtobufDiscoveryNode node) { return (newConnection, actualProfile, listener) -> { // We don't validate cluster names to allow for CCS connections. handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { - final DiscoveryNode remote = resp.discoveryNode; + final ProtobufDiscoveryNode remote = resp.discoveryNode; logger.info("Connection validation was skipped"); return null; })); @@ -464,7 +474,7 @@ public ConnectionManager.ConnectionValidator extensionConnectionValidator(Discov * @param node the node to connect to * @param connectionProfile the connection profile to use */ - public Transport.Connection openConnection(final DiscoveryNode node, ConnectionProfile connectionProfile) { + public ProtobufTransport.Connection openConnection(final ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile) { return PlainActionFuture.get(fut -> openConnection(node, connectionProfile, fut)); } @@ -477,9 +487,9 @@ public Transport.Connection openConnection(final DiscoveryNode node, ConnectionP * @param listener the action listener to notify */ public void openConnection( - final DiscoveryNode node, - ConnectionProfile connectionProfile, - ActionListener listener + final ProtobufDiscoveryNode node, + ProtobufConnectionProfile connectionProfile, + ActionListener listener ) { if (isLocalNode(node)) { listener.onResponse(localNodeConnection); @@ -498,13 +508,13 @@ public void openConnection( * @param connection the connection to a specific node * @param handshakeTimeout handshake timeout * @param listener action listener to notify - * @throws ConnectTransportException if the connection failed + * @throws ProtobufConnectTransportException if the connection failed * @throws IllegalStateException if the handshake failed */ public void handshake( - final Transport.Connection connection, + final ProtobufTransport.Connection connection, final long handshakeTimeout, - final ActionListener listener + final ActionListener listener ) { handshake( connection, @@ -528,18 +538,18 @@ public void handshake( * @throws IllegalStateException if the handshake failed */ public void handshake( - final Transport.Connection connection, + final ProtobufTransport.Connection connection, final long handshakeTimeout, - Predicate clusterNamePredicate, + Predicate clusterNamePredicate, final ActionListener listener ) { - final DiscoveryNode node = connection.getNode(); + final ProtobufDiscoveryNode node = connection.getNode(); sendRequest( connection, HANDSHAKE_ACTION_NAME, HandshakeRequest.INSTANCE, TransportRequestOptions.builder().withTimeout(handshakeTimeout).build(), - new ActionListenerResponseHandler<>(new ActionListener() { + new ProtobufActionListenerResponseHandler<>(new ActionListener() { @Override public void onResponse(HandshakeResponse response) { if (clusterNamePredicate.test(response.clusterName) == false) { @@ -574,11 +584,11 @@ public void onResponse(HandshakeResponse response) { public void onFailure(Exception e) { listener.onFailure(e); } - }, HandshakeResponse::new, ThreadPool.Names.GENERIC) + }, HandshakeResponse::new, ProtobufThreadPool.Names.GENERIC) ); } - public ConnectionManager getConnectionManager() { + public ProtobufConnectionManager getConnectionManager() { return connectionManager; } @@ -587,11 +597,11 @@ public ConnectionManager getConnectionManager() { * * @opensearch.internal */ - static class HandshakeRequest extends TransportRequest { + static class HandshakeRequest extends ProtobufTransportRequest { public static final HandshakeRequest INSTANCE = new HandshakeRequest(); - HandshakeRequest(StreamInput in) throws IOException { + HandshakeRequest(CodedInputStream in) throws IOException { super(in); } @@ -604,100 +614,102 @@ private HandshakeRequest() {} * * @opensearch.internal */ - public static class HandshakeResponse extends TransportResponse { - private final DiscoveryNode discoveryNode; - private final ClusterName clusterName; + public static class HandshakeResponse extends ProtobufTransportResponse { + private final ProtobufDiscoveryNode discoveryNode; + private final ProtobufClusterName clusterName; private final Version version; - public HandshakeResponse(DiscoveryNode discoveryNode, ClusterName clusterName, Version version) { + public HandshakeResponse(ProtobufDiscoveryNode discoveryNode, ProtobufClusterName clusterName, Version version) { this.discoveryNode = discoveryNode; this.version = version; this.clusterName = clusterName; } - public HandshakeResponse(StreamInput in) throws IOException { + public HandshakeResponse(CodedInputStream in) throws IOException { super(in); - discoveryNode = in.readOptionalWriteable(DiscoveryNode::new); - clusterName = new ClusterName(in); - version = Version.readVersion(in); + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + discoveryNode = protobufStreamInput.readOptionalWriteable(ProtobufDiscoveryNode::new); + clusterName = new ProtobufClusterName(in); + version = Version.readVersionProtobuf(in); } @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalWriteable(discoveryNode); + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeOptionalWriteable(discoveryNode); clusterName.writeTo(out); - Version.writeVersion(version, out); + out.writeInt32NoTag(version.id); } - public DiscoveryNode getDiscoveryNode() { + public ProtobufDiscoveryNode getDiscoveryNode() { return discoveryNode; } - public ClusterName getClusterName() { + public ProtobufClusterName getClusterName() { return clusterName; } } - public void disconnectFromNode(DiscoveryNode node) { + public void disconnectFromNode(ProtobufDiscoveryNode node) { if (isLocalNode(node)) { return; } connectionManager.disconnectFromNode(node); } - public void addMessageListener(TransportMessageListener listener) { + public void addMessageListener(ProtobufTransportMessageListener listener) { messageListener.listeners.add(listener); } - public boolean removeMessageListener(TransportMessageListener listener) { + public boolean removeMessageListener(ProtobufTransportMessageListener listener) { return messageListener.listeners.remove(listener); } - public void addConnectionListener(TransportConnectionListener listener) { + public void addConnectionListener(ProtobufTransportConnectionListener listener) { connectionManager.addListener(listener); } - public void removeConnectionListener(TransportConnectionListener listener) { + public void removeConnectionListener(ProtobufTransportConnectionListener listener) { connectionManager.removeListener(listener); } - public TransportFuture submitRequest( - DiscoveryNode node, + public TransportFuture submitRequest( + ProtobufDiscoveryNode node, String action, - TransportRequest request, - TransportResponseHandler handler - ) throws TransportException { + ProtobufTransportRequest request, + ProtobufTransportResponseHandler handler + ) throws ProtobufTransportException { return submitRequest(node, action, request, TransportRequestOptions.EMPTY, handler); } - public TransportFuture submitRequest( - DiscoveryNode node, + public TransportFuture submitRequest( + ProtobufDiscoveryNode node, String action, - TransportRequest request, + ProtobufTransportRequest request, TransportRequestOptions options, - TransportResponseHandler handler - ) throws TransportException { - PlainTransportFuture futureHandler = new PlainTransportFuture<>(handler); + ProtobufTransportResponseHandler handler + ) throws ProtobufTransportException { + ProtobufPlainTransportFuture futureHandler = new ProtobufPlainTransportFuture<>(handler); try { - Transport.Connection connection = getConnection(node); + ProtobufTransport.Connection connection = getConnection(node); sendRequest(connection, action, request, options, futureHandler); - } catch (NodeNotConnectedException ex) { + } catch (ProtobufNodeNotConnectedException ex) { // the caller might not handle this so we invoke the handler futureHandler.handleException(ex); } return futureHandler; } - public void sendRequest( - final DiscoveryNode node, + public void sendRequest( + final ProtobufDiscoveryNode node, final String action, - final TransportRequest request, - final TransportResponseHandler handler + final ProtobufTransportRequest request, + final ProtobufTransportResponseHandler handler ) { - final Transport.Connection connection; + final ProtobufTransport.Connection connection; try { connection = getConnection(node); - } catch (final NodeNotConnectedException ex) { + } catch (final ProtobufNodeNotConnectedException ex) { // the caller might not handle this so we invoke the handler handler.handleException(ex); return; @@ -705,17 +717,17 @@ public void sendRequest( sendRequest(connection, action, request, TransportRequestOptions.EMPTY, handler); } - public final void sendRequest( - final DiscoveryNode node, + public final void sendRequest( + final ProtobufDiscoveryNode node, final String action, - final TransportRequest request, + final ProtobufTransportRequest request, final TransportRequestOptions options, - TransportResponseHandler handler + ProtobufTransportResponseHandler handler ) { - final Transport.Connection connection; + final ProtobufTransport.Connection connection; try { connection = getConnection(node); - } catch (final NodeNotConnectedException ex) { + } catch (final ProtobufNodeNotConnectedException ex) { // the caller might not handle this so we invoke the handler handler.handleException(ex); return; @@ -733,20 +745,20 @@ public final void sendRequest( * @param handler the response handler * @param the type of the transport response */ - public final void sendRequest( - final Transport.Connection connection, + public final void sendRequest( + final ProtobufTransport.Connection connection, final String action, - final TransportRequest request, + final ProtobufTransportRequest request, final TransportRequestOptions options, - final TransportResponseHandler handler + final ProtobufTransportResponseHandler handler ) { try { logger.debug("Action: " + action); - final TransportResponseHandler delegate; + final ProtobufTransportResponseHandler delegate; if (request.getParentTask().isSet()) { // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. final Releasable unregisterChildNode = taskManager.registerChildNode(request.getParentTask().getId(), connection.getNode()); - delegate = new TransportResponseHandler() { + delegate = new ProtobufTransportResponseHandler() { @Override public void handleResponse(T response) { unregisterChildNode.close(); @@ -754,7 +766,7 @@ public void handleResponse(T response) { } @Override - public void handleException(TransportException exp) { + public void handleException(ProtobufTransportException exp) { unregisterChildNode.close(); handler.handleException(exp); } @@ -765,7 +777,7 @@ public String executor() { } @Override - public T read(StreamInput in) throws IOException { + public T read(CodedInputStream in) throws IOException { return handler.read(in); } @@ -780,11 +792,11 @@ public String toString() { asyncSender.sendRequest(connection, action, request, options, delegate); } catch (final Exception ex) { // the caller might not handle this so we invoke the handler - final TransportException te; - if (ex instanceof TransportException) { - te = (TransportException) ex; + final ProtobufTransportException te; + if (ex instanceof ProtobufTransportException) { + te = (ProtobufTransportException) ex; } else { - te = new TransportException("failure to send", ex); + te = new ProtobufTransportException("failure to send", ex); } handler.handleException(te); } @@ -792,9 +804,9 @@ public String toString() { /** * Returns either a real transport connection or a local node connection if we are using the local node optimization. - * @throws NodeNotConnectedException if the given node is not connected + * @throws ProtobufNodeNotConnectedException if the given node is not connected */ - public Transport.Connection getConnection(DiscoveryNode node) { + public ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode node) { if (isLocalNode(node)) { return localNodeConnection; } else { @@ -802,18 +814,18 @@ public Transport.Connection getConnection(DiscoveryNode node) { } } - public final void sendChildRequest( - final DiscoveryNode node, + public final void sendChildRequest( + final ProtobufDiscoveryNode node, final String action, - final TransportRequest request, - final Task parentTask, + final ProtobufTransportRequest request, + final ProtobufTask parentTask, final TransportRequestOptions options, - final TransportResponseHandler handler + final ProtobufTransportResponseHandler handler ) { - final Transport.Connection connection; + final ProtobufTransport.Connection connection; try { connection = getConnection(node); - } catch (final NodeNotConnectedException ex) { + } catch (final ProtobufNodeNotConnectedException ex) { // the caller might not handle this so we invoke the handler handler.handleException(ex); return; @@ -821,44 +833,44 @@ public final void sendChildRequest( sendChildRequest(connection, action, request, parentTask, options, handler); } - public void sendChildRequest( - final Transport.Connection connection, + public void sendChildRequest( + final ProtobufTransport.Connection connection, final String action, - final TransportRequest request, - final Task parentTask, - final TransportResponseHandler handler + final ProtobufTransportRequest request, + final ProtobufTask parentTask, + final ProtobufTransportResponseHandler handler ) { sendChildRequest(connection, action, request, parentTask, TransportRequestOptions.EMPTY, handler); } - public void sendChildRequest( - final Transport.Connection connection, + public void sendChildRequest( + final ProtobufTransport.Connection connection, final String action, - final TransportRequest request, - final Task parentTask, + final ProtobufTransportRequest request, + final ProtobufTask parentTask, final TransportRequestOptions options, - final TransportResponseHandler handler + final ProtobufTransportResponseHandler handler ) { request.setParentTask(localNode.getId(), parentTask.getId()); sendRequest(connection, action, request, options, handler); } - private void sendRequestInternal( - final Transport.Connection connection, + private void sendRequestInternal( + final ProtobufTransport.Connection connection, final String action, - final TransportRequest request, + final ProtobufTransportRequest request, final TransportRequestOptions options, - TransportResponseHandler handler + ProtobufTransportResponseHandler handler ) { if (connection == null) { throw new IllegalStateException("can't send request to a null connection"); } - DiscoveryNode node = connection.getNode(); + ProtobufDiscoveryNode node = connection.getNode(); Supplier storedContextSupplier = threadPool.getThreadContext().newRestorableContext(true); ContextRestoreResponseHandler responseHandler = new ContextRestoreResponseHandler<>(storedContextSupplier, handler); // TODO we can probably fold this entire request ID dance into connection.sendReqeust but it will be a bigger refactoring - final long requestId = responseHandlers.add(new Transport.ResponseContext<>(responseHandler, connection, action)); + final long requestId = responseHandlers.add(new ProtobufTransport.ResponseContext<>(responseHandler, connection, action)); final TimeoutHandler timeoutHandler; if (options.timeout() != null) { timeoutHandler = new TimeoutHandler(requestId, connection.getNode(), action); @@ -872,7 +884,7 @@ private void sendRequestInternal( * If we are not started the exception handling will remove the request holder again and calls the handler to notify the * caller. It will only notify if toStop hasn't done the work yet. */ - throw new NodeClosedException(localNode); + throw new ProtobufNodeClosedException(localNode); } if (timeoutHandler != null) { assert options.timeout() != null; @@ -882,7 +894,9 @@ private void sendRequestInternal( } catch (final Exception e) { // usually happen either because we failed to connect to the node // or because we failed serializing the message - final Transport.ResponseContext contextToNotify = responseHandlers.remove(requestId); + final ProtobufTransport.ResponseContext contextToNotify = responseHandlers.remove( + requestId + ); // If holderToNotify == null then handler has already been taken care of. if (contextToNotify != null) { if (timeoutHandler != null) { @@ -891,8 +905,12 @@ private void sendRequestInternal( // callback that an exception happened, but on a different thread since we don't // want handlers to worry about stack overflows. In the special case of running into a closing node we run on the current // thread on a best effort basis though. - final SendRequestTransportException sendRequestException = new SendRequestTransportException(node, action, e); - final String executor = lifecycle.stoppedOrClosed() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC; + final ProtobufSendRequestTransportException sendRequestException = new ProtobufSendRequestTransportException( + node, + action, + e + ); + final String executor = lifecycle.stoppedOrClosed() ? ProtobufThreadPool.Names.SAME : ProtobufThreadPool.Names.GENERIC; threadPool.executor(executor).execute(new AbstractRunnable() { @Override public void onRejection(Exception e) { @@ -928,17 +946,22 @@ protected void doRun() throws Exception { } } - private void sendLocalRequest(long requestId, final String action, final TransportRequest request, TransportRequestOptions options) { + private void sendLocalRequest( + long requestId, + final String action, + final ProtobufTransportRequest request, + TransportRequestOptions options + ) { final DirectResponseChannel channel = new DirectResponseChannel(localNode, action, requestId, this, threadPool); try { onRequestSent(localNode, requestId, action, request, options); onRequestReceived(requestId, action); - final RequestHandlerRegistry reg = getRequestHandler(action); + final ProtobufRequestHandlerRegistry reg = getRequestHandler(action); if (reg == null) { throw new ActionNotFoundTransportException("Action [" + action + "] not found"); } final String executor = reg.getExecutor(); - if (ThreadPool.Names.SAME.equals(executor)) { + if (ProtobufThreadPool.Names.SAME.equals(executor)) { // noinspection unchecked reg.processMessageReceived(request, channel); } else { @@ -1000,7 +1023,7 @@ public static boolean shouldTraceAction(String action, String[] include, String[ return true; } - public TransportAddress[] addressesFromString(String address) throws UnknownHostException { + public ProtobufTransportAddress[] addressesFromString(String address) throws UnknownHostException { return transport.addressesFromString(address); } @@ -1027,7 +1050,9 @@ private void validateActionName(String actionName) { // TODO we should makes this a hard validation and throw an exception but we need a good way to add backwards layer // for it. Maybe start with a deprecation layer if (isValidActionName(actionName) == false) { - logger.warn("invalid action name [" + actionName + "] must start with one of: " + ProtobufTransportService.VALID_ACTION_PREFIXES); + logger.warn( + "invalid action name [" + actionName + "] must start with one of: " + ProtobufTransportService.VALID_ACTION_PREFIXES + ); } } @@ -1053,15 +1078,15 @@ public static boolean isValidActionName(String actionName) { * @param executor The executor the request handling will be executed on * @param handler The handler itself that implements the request handling */ - public void registerRequestHandler( + public void registerRequestHandler( String action, String executor, - Writeable.Reader requestReader, - TransportRequestHandler handler + ProtobufWriteable.Reader requestReader, + ProtobufTransportRequestHandler handler ) { validateActionName(action); handler = interceptor.interceptHandler(action, executor, false, handler); - RequestHandlerRegistry reg = new RequestHandlerRegistry<>( + ProtobufRequestHandlerRegistry reg = new ProtobufRequestHandlerRegistry<>( action, requestReader, taskManager, @@ -1083,17 +1108,17 @@ public void registerRequestHandler( * @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached. * @param handler The handler itself that implements the request handling */ - public void registerRequestHandler( + public void registerRequestHandler( String action, String executor, boolean forceExecution, boolean canTripCircuitBreaker, - Writeable.Reader requestReader, - TransportRequestHandler handler + ProtobufWriteable.Reader requestReader, + ProtobufTransportRequestHandler handler ) { validateActionName(action); handler = interceptor.interceptHandler(action, executor, forceExecution, handler); - RequestHandlerRegistry reg = new RequestHandlerRegistry<>( + ProtobufRequestHandlerRegistry reg = new ProtobufRequestHandlerRegistry<>( action, requestReader, taskManager, @@ -1106,7 +1131,7 @@ public void registerRequestHandler( } /** - * called by the {@link Transport} implementation when an incoming request arrives but before + * called by the {@link ProtobufTransport} implementation when an incoming request arrives but before * any parsing of it has happened (with the exception of the requestId and action) */ @Override @@ -1120,13 +1145,13 @@ public void onRequestReceived(long requestId, String action) { messageListener.onRequestReceived(requestId, action); } - /** called by the {@link Transport} implementation once a request has been sent */ + /** called by the {@link ProtobufTransport} implementation once a request has been sent */ @Override public void onRequestSent( - DiscoveryNode node, + ProtobufDiscoveryNode node, long requestId, String action, - TransportRequest request, + ProtobufTransportRequest request, TransportRequestOptions options ) { if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { @@ -1136,7 +1161,7 @@ public void onRequestSent( } @Override - public void onResponseReceived(long requestId, Transport.ResponseContext holder) { + public void onResponseReceived(long requestId, ProtobufTransport.ResponseContext holder) { if (holder == null) { checkForTimeout(requestId); } else if (tracerLog.isTraceEnabled() && shouldTraceAction(holder.action())) { @@ -1145,16 +1170,16 @@ public void onResponseReceived(long requestId, Transport.ResponseContext holder) messageListener.onResponseReceived(requestId, holder); } - /** called by the {@link Transport} implementation once a response was sent to calling node */ + /** called by the {@link ProtobufTransport} implementation once a response was sent to calling node */ @Override - public void onResponseSent(long requestId, String action, TransportResponse response) { + public void onResponseSent(long requestId, String action, ProtobufTransportResponse response) { if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { tracerLog.trace("[{}][{}] sent response", requestId, action); } messageListener.onResponseSent(requestId, action, response); } - /** called by the {@link Transport} implementation after an exception was sent as a response to an incoming request */ + /** called by the {@link ProtobufTransport} implementation after an exception was sent as a response to an incoming request */ @Override public void onResponseSent(long requestId, String action, Exception e) { if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { @@ -1163,13 +1188,13 @@ public void onResponseSent(long requestId, String action, Exception e) { messageListener.onResponseSent(requestId, action, e); } - public RequestHandlerRegistry getRequestHandler(String action) { + public ProtobufRequestHandlerRegistry getRequestHandler(String action) { return transport.getRequestHandlers().getHandler(action); } private void checkForTimeout(long requestId) { // lets see if its in the timeout holder, but sync on mutex to make sure any ongoing timeout handling has finished - final DiscoveryNode sourceNode; + final ProtobufDiscoveryNode sourceNode; final String action; assert responseHandlers.contains(requestId) == false; TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId); @@ -1187,7 +1212,7 @@ private void checkForTimeout(long requestId) { action = timeoutInfoHolder.action(); sourceNode = timeoutInfoHolder.node(); } else { - logger.warn("Transport response handler not found of id [{}]", requestId); + logger.warn("ProtobufTransport response handler not found of id [{}]", requestId); action = null; sourceNode = null; } @@ -1204,9 +1229,9 @@ private void checkForTimeout(long requestId) { } @Override - public void onConnectionClosed(Transport.Connection connection) { + public void onConnectionClosed(ProtobufTransport.Connection connection) { try { - List> pruned = responseHandlers.prune( + List> pruned = responseHandlers.prune( h -> h.connection().getCacheKey().equals(connection.getCacheKey()) ); // callback that an exception happened, but on a different thread since we don't @@ -1214,9 +1239,9 @@ public void onConnectionClosed(Transport.Connection connection) { getExecutorService().execute(new Runnable() { @Override public void run() { - for (Transport.ResponseContext holderToNotify : pruned) { + for (ProtobufTransport.ResponseContext holderToNotify : pruned) { holderToNotify.handler() - .handleException(new NodeDisconnectedException(connection.getNode(), holderToNotify.action())); + .handleException(new ProtobufNodeDisconnectedException(connection.getNode(), holderToNotify.action())); } } @@ -1235,10 +1260,10 @@ final class TimeoutHandler implements Runnable { private final long requestId; private final long sentTime = threadPool.relativeTimeInMillis(); private final String action; - private final DiscoveryNode node; + private final ProtobufDiscoveryNode node; volatile Scheduler.Cancellable cancellable; - TimeoutHandler(long requestId, DiscoveryNode node, String action) { + TimeoutHandler(long requestId, ProtobufDiscoveryNode node, String action) { this.requestId = requestId; this.node = node; this.action = action; @@ -1250,13 +1275,13 @@ public void run() { long timeoutTime = threadPool.relativeTimeInMillis(); timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(node, action, sentTime, timeoutTime)); // now that we have the information visible via timeoutInfoHandlers, we try to remove the request id - final Transport.ResponseContext holder = responseHandlers.remove(requestId); + final ProtobufTransport.ResponseContext holder = responseHandlers.remove(requestId); if (holder != null) { assert holder.action().equals(action); assert holder.connection().getNode().equals(node); holder.handler() .handleException( - new ReceiveTimeoutTransportException( + new ProtobufReceiveTimeoutTransportException( holder.connection().getNode(), holder.action(), "request_id [" + requestId + "] timed out after [" + (timeoutTime - sentTime) + "ms]" @@ -1288,7 +1313,7 @@ public String toString() { } private void scheduleTimeout(TimeValue timeout) { - this.cancellable = threadPool.schedule(this, timeout, ThreadPool.Names.GENERIC); + this.cancellable = threadPool.schedule(this, timeout, ProtobufThreadPool.Names.GENERIC); } } @@ -1299,19 +1324,19 @@ private void scheduleTimeout(TimeValue timeout) { */ static class TimeoutInfoHolder { - private final DiscoveryNode node; + private final ProtobufDiscoveryNode node; private final String action; private final long sentTime; private final long timeoutTime; - TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) { + TimeoutInfoHolder(ProtobufDiscoveryNode node, String action, long sentTime, long timeoutTime) { this.node = node; this.action = action; this.sentTime = sentTime; this.timeoutTime = timeoutTime; } - public DiscoveryNode node() { + public ProtobufDiscoveryNode node() { return node; } @@ -1332,19 +1357,24 @@ public long timeoutTime() { * This handler wrapper ensures that the response thread executes with the correct thread context. Before any of the handle methods * are invoked we restore the context. */ - public static final class ContextRestoreResponseHandler implements TransportResponseHandler { + public static final class ContextRestoreResponseHandler + implements + ProtobufTransportResponseHandler { - private final TransportResponseHandler delegate; + private final ProtobufTransportResponseHandler delegate; private final Supplier contextSupplier; private volatile TimeoutHandler handler; - public ContextRestoreResponseHandler(Supplier contextSupplier, TransportResponseHandler delegate) { + public ContextRestoreResponseHandler( + Supplier contextSupplier, + ProtobufTransportResponseHandler delegate + ) { this.delegate = delegate; this.contextSupplier = contextSupplier; } @Override - public T read(StreamInput in) throws IOException { + public T read(CodedInputStream in) throws IOException { return delegate.read(in); } @@ -1359,7 +1389,7 @@ public void handleResponse(T response) { } @Override - public void handleException(TransportException exp) { + public void handleException(ProtobufTransportException exp) { if (handler != null) { handler.cancel(); } @@ -1389,14 +1419,20 @@ void setTimeoutHandler(TimeoutHandler handler) { * * @opensearch.internal */ - static class DirectResponseChannel implements TransportChannel { - final DiscoveryNode localNode; + static class DirectResponseChannel implements ProtobufTransportChannel { + final ProtobufDiscoveryNode localNode; private final String action; private final long requestId; final ProtobufTransportService service; - final ThreadPool threadPool; + final ProtobufThreadPool threadPool; - DirectResponseChannel(DiscoveryNode localNode, String action, long requestId, ProtobufTransportService service, ThreadPool threadPool) { + DirectResponseChannel( + ProtobufDiscoveryNode localNode, + String action, + long requestId, + ProtobufTransportService service, + ProtobufThreadPool threadPool + ) { this.localNode = localNode; this.action = action; this.requestId = requestId; @@ -1410,13 +1446,13 @@ public String getProfileName() { } @Override - public void sendResponse(TransportResponse response) throws IOException { + public void sendResponse(ProtobufTransportResponse response) throws IOException { service.onResponseSent(requestId, action, response); - final TransportResponseHandler handler = service.responseHandlers.onResponseReceived(requestId, service); + final ProtobufTransportResponseHandler handler = service.responseHandlers.onResponseReceived(requestId, service); // ignore if its null, the service logs it if (handler != null) { final String executor = handler.executor(); - if (ThreadPool.Names.SAME.equals(executor)) { + if (ProtobufThreadPool.Names.SAME.equals(executor)) { processResponse(handler, response); } else { threadPool.executor(executor).execute(new Runnable() { @@ -1435,7 +1471,7 @@ public String toString() { } @SuppressWarnings("unchecked") - protected void processResponse(TransportResponseHandler handler, TransportResponse response) { + protected void processResponse(ProtobufTransportResponseHandler handler, ProtobufTransportResponse response) { try { handler.handleResponse(response); } catch (Exception e) { @@ -1446,12 +1482,12 @@ protected void processResponse(TransportResponseHandler handler, TransportRespon @Override public void sendResponse(Exception exception) throws IOException { service.onResponseSent(requestId, action, exception); - final TransportResponseHandler handler = service.responseHandlers.onResponseReceived(requestId, service); + final ProtobufTransportResponseHandler handler = service.responseHandlers.onResponseReceived(requestId, service); // ignore if its null, the service logs it if (handler != null) { - final RemoteTransportException rtx = wrapInRemote(exception); + final ProtobufRemoteTransportException rtx = wrapInRemote(exception); final String executor = handler.executor(); - if (ThreadPool.Names.SAME.equals(executor)) { + if (ProtobufThreadPool.Names.SAME.equals(executor)) { processException(handler, rtx); } else { threadPool.executor(handler.executor()).execute(new Runnable() { @@ -1469,14 +1505,14 @@ public String toString() { } } - protected RemoteTransportException wrapInRemote(Exception e) { - if (e instanceof RemoteTransportException) { - return (RemoteTransportException) e; + protected ProtobufRemoteTransportException wrapInRemote(Exception e) { + if (e instanceof ProtobufRemoteTransportException) { + return (ProtobufRemoteTransportException) e; } - return new RemoteTransportException(localNode.getName(), localNode.getAddress(), action, e); + return new ProtobufRemoteTransportException(localNode.getName(), localNode.getAddress(), action, e); } - protected void processException(final TransportResponseHandler handler, final RemoteTransportException rtx) { + protected void processException(final ProtobufTransportResponseHandler handler, final ProtobufRemoteTransportException rtx) { try { handler.handleException(rtx); } catch (Exception e) { @@ -1501,55 +1537,55 @@ public Version getVersion() { /** * Returns the internal thread pool */ - public ThreadPool getThreadPool() { + public ProtobufThreadPool getThreadPool() { return threadPool; } - private boolean isLocalNode(DiscoveryNode discoveryNode) { + private boolean isLocalNode(ProtobufDiscoveryNode discoveryNode) { return Objects.requireNonNull(discoveryNode, "discovery node must not be null").equals(localNode); } - private static final class DelegatingTransportMessageListener implements TransportMessageListener { + private static final class DelegatingTransportMessageListener implements ProtobufTransportMessageListener { - private final List listeners = new CopyOnWriteArrayList<>(); + private final List listeners = new CopyOnWriteArrayList<>(); @Override public void onRequestReceived(long requestId, String action) { - for (TransportMessageListener listener : listeners) { + for (ProtobufTransportMessageListener listener : listeners) { listener.onRequestReceived(requestId, action); } } @Override - public void onResponseSent(long requestId, String action, TransportResponse response) { - for (TransportMessageListener listener : listeners) { + public void onResponseSent(long requestId, String action, ProtobufTransportResponse response) { + for (ProtobufTransportMessageListener listener : listeners) { listener.onResponseSent(requestId, action, response); } } @Override public void onResponseSent(long requestId, String action, Exception error) { - for (TransportMessageListener listener : listeners) { + for (ProtobufTransportMessageListener listener : listeners) { listener.onResponseSent(requestId, action, error); } } @Override public void onRequestSent( - DiscoveryNode node, + ProtobufDiscoveryNode node, long requestId, String action, - TransportRequest request, + ProtobufTransportRequest request, TransportRequestOptions finalOptions ) { - for (TransportMessageListener listener : listeners) { + for (ProtobufTransportMessageListener listener : listeners) { listener.onRequestSent(node, requestId, action, request, finalOptions); } } @Override - public void onResponseReceived(long requestId, Transport.ResponseContext holder) { - for (TransportMessageListener listener : listeners) { + public void onResponseReceived(long requestId, ProtobufTransport.ResponseContext holder) { + for (ProtobufTransportMessageListener listener : listeners) { listener.onResponseReceived(requestId, holder); } } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportStats.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportStats.java new file mode 100644 index 0000000000000..963bf4665af8b --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportStats.java @@ -0,0 +1,126 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Stats for transport activity +* +* @opensearch.internal +*/ +public class ProtobufTransportStats implements ProtobufWriteable, ToXContentFragment { + + private final long serverOpen; + private final long totalOutboundConnections; + private final long rxCount; + private final long rxSize; + private final long txCount; + private final long txSize; + + public ProtobufTransportStats(long serverOpen, long totalOutboundConnections, long rxCount, long rxSize, long txCount, long txSize) { + this.serverOpen = serverOpen; + this.totalOutboundConnections = totalOutboundConnections; + this.rxCount = rxCount; + this.rxSize = rxSize; + this.txCount = txCount; + this.txSize = txSize; + } + + public ProtobufTransportStats(CodedInputStream in) throws IOException { + serverOpen = in.readInt64(); + totalOutboundConnections = in.readInt64(); + rxCount = in.readInt64(); + rxSize = in.readInt64(); + txCount = in.readInt64(); + txSize = in.readInt64(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeInt64NoTag(serverOpen); + out.writeInt64NoTag(totalOutboundConnections); + out.writeInt64NoTag(rxCount); + out.writeInt64NoTag(rxSize); + out.writeInt64NoTag(txCount); + out.writeInt64NoTag(txSize); + } + + public long serverOpen() { + return this.serverOpen; + } + + public long getServerOpen() { + return serverOpen(); + } + + public long rxCount() { + return rxCount; + } + + public long getRxCount() { + return rxCount(); + } + + public ByteSizeValue rxSize() { + return new ByteSizeValue(rxSize); + } + + public ByteSizeValue getRxSize() { + return rxSize(); + } + + public long txCount() { + return txCount; + } + + public long getTxCount() { + return txCount(); + } + + public ByteSizeValue txSize() { + return new ByteSizeValue(txSize); + } + + public ByteSizeValue getTxSize() { + return txSize(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.TRANSPORT); + builder.field(Fields.SERVER_OPEN, serverOpen); + builder.field(Fields.TOTAL_OUTBOUND_CONNECTIONS, totalOutboundConnections); + builder.field(Fields.RX_COUNT, rxCount); + builder.humanReadableField(Fields.RX_SIZE_IN_BYTES, Fields.RX_SIZE, new ByteSizeValue(rxSize)); + builder.field(Fields.TX_COUNT, txCount); + builder.humanReadableField(Fields.TX_SIZE_IN_BYTES, Fields.TX_SIZE, new ByteSizeValue(txSize)); + builder.endObject(); + return builder; + } + + static final class Fields { + static final String TRANSPORT = "transport"; + static final String SERVER_OPEN = "server_open"; + static final String TOTAL_OUTBOUND_CONNECTIONS = "total_outbound_connections"; + static final String RX_COUNT = "rx_count"; + static final String RX_SIZE = "rx_size"; + static final String RX_SIZE_IN_BYTES = "rx_size_in_bytes"; + static final String TX_COUNT = "tx_count"; + static final String TX_SIZE = "tx_size"; + static final String TX_SIZE_IN_BYTES = "tx_size_in_bytes"; + } +} diff --git a/server/src/main/java/org/opensearch/transport/TcpHeader.java b/server/src/main/java/org/opensearch/transport/TcpHeader.java index 78353a9a80403..7e991bfddd53e 100644 --- a/server/src/main/java/org/opensearch/transport/TcpHeader.java +++ b/server/src/main/java/org/opensearch/transport/TcpHeader.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import com.google.protobuf.CodedOutputStream; import org.opensearch.Version; import org.opensearch.core.common.io.stream.StreamOutput; @@ -91,4 +92,22 @@ public static void writeHeader( assert variableHeaderSize != -1 : "Variable header size not set"; output.writeInt(variableHeaderSize); } + + public static void writeHeaderProtobuf( + CodedOutputStream output, + long requestId, + byte status, + Version version, + int contentSize, + int variableHeaderSize + ) throws IOException { + output.writeByteArrayNoTag(PREFIX); + // write the size, the size indicates the remaining message size, not including the size int + output.writeInt32NoTag(contentSize + REQUEST_ID_SIZE + STATUS_SIZE + VERSION_ID_SIZE + VARIABLE_HEADER_SIZE); + output.writeInt64NoTag(requestId); + output.writeRawByte(status); + output.writeInt32NoTag(version.id); + assert variableHeaderSize != -1 : "Variable header size not set"; + output.writeInt32NoTag(variableHeaderSize); + } } From 8e570343d2a443e312f3a20ca66c232a12dd99e5 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Wed, 21 Jun 2023 21:53:41 +0000 Subject: [PATCH 09/37] Fixing node crashes Signed-off-by: Vacha Shah --- ...nsionRequestProto$ExtensionRequest$1.class | Bin 0 -> 2790 bytes ...equestProto$ExtensionRequest$Builder.class | Bin 0 -> 11386 bytes ...tensionRequestProto$ExtensionRequest.class | Bin 0 -> 12446 bytes ...questProto$ExtensionRequestOrBuilder.class | Bin 0 -> 697 bytes .../ExtensionRequestProto$RequestType$1.class | Bin 0 -> 1234 bytes .../ExtensionRequestProto$RequestType.class | Bin 0 -> 5611 bytes .../proto/ExtensionRequestProto.class | Bin 0 -> 3318 bytes .../tasks/proto/TaskIdProto$TaskId$1.class | Bin 0 -> 2505 bytes .../proto/TaskIdProto$TaskId$Builder.class | Bin 0 -> 9996 bytes .../tasks/proto/TaskIdProto$TaskId.class | Bin 0 -> 11288 bytes .../proto/TaskIdProto$TaskIdOrBuilder.class | Bin 0 -> 429 bytes .../opensearch/tasks/proto/TaskIdProto.class | Bin 0 -> 2532 bytes ...sourceStatsProto$TaskResourceStats$1.class | Bin 0 -> 2758 bytes ...StatsProto$TaskResourceStats$Builder.class | Bin 0 -> 14366 bytes ...tats$ResourceUsageDefaultEntryHolder.class | Bin 0 -> 1971 bytes ...askResourceStats$TaskResourceUsage$1.class | Bin 0 -> 3039 bytes ...ourceStats$TaskResourceUsage$Builder.class | Bin 0 -> 9805 bytes ...$TaskResourceStats$TaskResourceUsage.class | Bin 0 -> 11572 bytes ...urceStats$TaskResourceUsageOrBuilder.class | Bin 0 -> 537 bytes ...ResourceStatsProto$TaskResourceStats.class | Bin 0 -> 15186 bytes ...tatsProto$TaskResourceStatsOrBuilder.class | Bin 0 -> 1364 bytes .../tasks/proto/TaskResourceStatsProto.class | Bin 0 -> 4154 bytes .../org/opensearch/action/ActionModule.java | 186 +++ ...ProtobufActionListenerResponseHandler.java | 4 +- .../ProtobufTransportNodesInfoAction.java | 128 ++ .../ProtobufTransportNodesStatsAction.java | 132 ++ .../ProtobufTransportClusterStateAction.java | 212 +++ .../action/main/ProtobufMainAction.java | 26 + .../action/main/ProtobufMainRequest.java | 35 + .../action/main/ProtobufMainResponse.java | 185 +++ .../main/ProtobufTransportMainAction.java | 56 + .../ProtobufHandledTransportAction.java | 83 + .../support/ProtobufTransportAction.java | 32 +- ...obufTransportClusterManagerNodeAction.java | 352 +++++ ...TransportClusterManagerNodeReadAction.java | 67 + .../nodes/ProtobufTransportNodesAction.java | 309 ++++ .../client/ProtobufFilterClient.java | 4 +- .../client/ProtobufOpenSearchClient.java | 4 +- .../client/node/ProtobufNodeClient.java | 20 +- .../support/ProtobufAbstractClient.java | 10 +- .../ClusterManagerNodeChangePredicate.java | 23 + .../org/opensearch/cluster/ClusterModule.java | 8 + .../cluster/ProtobufClusterStateApplier.java | 2 +- .../cluster/ProtobufClusterStateListener.java | 22 + .../cluster/ProtobufClusterStateObserver.java | 379 +++++ .../ProtobufTimeoutClusterStateListener.java | 49 + .../ProtobufIndexNameExpressionResolver.java | 1342 +++++++++++++++++ .../service/ClusterApplierService.java | 14 + .../cluster/service/ClusterService.java | 9 + .../common/network/NetworkModule.java | 48 + .../common/settings/SettingsModule.java | 1 + .../common/unit/ProtobufSizeValue.java | 8 + .../org/opensearch/common/unit/SizeValue.java | 8 + .../http/AbstractHttpServerTransport.java | 2 + .../main/java/org/opensearch/node/Node.java | 180 ++- .../opensearch/node/ProtobufNodeService.java | 238 +++ .../node/ProtobufReportingService.java | 2 +- .../plugins/ProtobufActionPlugin.java | 205 +++ .../org/opensearch/rest/MethodHandlers.java | 33 + .../opensearch/rest/ProtobufRestHandler.java | 2 +- .../org/opensearch/rest/RestBaseHandler.java | 189 +++ .../org/opensearch/rest/RestController.java | 199 ++- .../java/org/opensearch/rest/RestHandler.java | 2 +- .../action/cat/ProtobufRestCatAction.java | 58 + .../action/cat/ProtobufRestNodesAction.java | 6 +- .../ProtobufTaskCancellationService.java | 22 +- .../opensearch/tasks/ProtobufTaskManager.java | 10 +- .../ProtobufTaskResourceTrackingService.java | 6 +- .../org/opensearch/tasks/TaskManager.java | 842 ++++++++++- .../threadpool/ProtobufExecutorBuilder.java | 91 -- .../ProtobufFixedExecutorBuilder.java | 182 --- .../ProtobufResizableExecutorBuilder.java | 134 -- .../ProtobufScalingExecutorBuilder.java | 141 -- .../threadpool/ProtobufThreadPool.java | 860 ----------- .../threadpool/ProtobufThreadPoolInfo.java | 10 +- .../org/opensearch/threadpool/ThreadPool.java | 84 +- .../ProtobufClusterConnectionManager.java | 26 +- .../transport/ProtobufConnectionManager.java | 14 +- .../transport/ProtobufOutboundHandler.java | 6 +- .../ProtobufRemoteClusterAwareClient.java | 14 +- .../ProtobufRemoteClusterConnection.java | 12 +- .../ProtobufRemoteClusterService.java | 10 +- .../ProtobufRemoteConnectionManager.java | 16 +- .../ProtobufRemoteConnectionStrategy.java | 2 +- .../ProtobufRequestHandlerRegistry.java | 14 +- .../ProtobufSniffConnectionStrategy.java | 18 +- .../transport/ProtobufTransport.java | 4 +- .../ProtobufTransportActionProxy.java | 8 +- .../ProtobufTransportConnectionListener.java | 8 +- .../ProtobufTransportInterceptor.java | 4 +- .../ProtobufTransportMessageListener.java | 2 +- .../transport/ProtobufTransportService.java | 137 +- .../transport/RemoteClusterService.java | 67 +- .../opensearch/transport/TcpTransport.java | 52 + .../org/opensearch/transport/Transport.java | 239 +++ .../org/opensearch/usage/UsageService.java | 31 + .../opensearch/rest/RestControllerTests.java | 4 +- .../test/transport/FakeTransport.java | 56 + .../test/transport/StubbableTransport.java | 55 + 99 files changed, 6388 insertions(+), 1667 deletions(-) create mode 100644 server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$ExtensionRequest$1.class create mode 100644 server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$ExtensionRequest$Builder.class create mode 100644 server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$ExtensionRequest.class create mode 100644 server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$ExtensionRequestOrBuilder.class create mode 100644 server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$RequestType$1.class create mode 100644 server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$RequestType.class create mode 100644 server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskIdProto$TaskId$1.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskIdProto$TaskId$Builder.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskIdProto$TaskId.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskIdProto$TaskIdOrBuilder.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskIdProto.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$1.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$Builder.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$ResourceUsageDefaultEntryHolder.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$TaskResourceUsage$1.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$TaskResourceUsage$Builder.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$TaskResourceUsage.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$TaskResourceUsageOrBuilder.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStatsOrBuilder.class create mode 100644 server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto.class create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/main/ProtobufMainAction.java create mode 100644 server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java create mode 100644 server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java create mode 100644 server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeReadAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufClusterStateListener.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufClusterStateObserver.java create mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufTimeoutClusterStateListener.java create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/ProtobufIndexNameExpressionResolver.java create mode 100644 server/src/main/java/org/opensearch/node/ProtobufNodeService.java create mode 100644 server/src/main/java/org/opensearch/plugins/ProtobufActionPlugin.java create mode 100644 server/src/main/java/org/opensearch/rest/RestBaseHandler.java create mode 100644 server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestCatAction.java delete mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufExecutorBuilder.java delete mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufFixedExecutorBuilder.java delete mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufResizableExecutorBuilder.java delete mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufScalingExecutorBuilder.java delete mode 100644 server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java diff --git a/server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$ExtensionRequest$1.class b/server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$ExtensionRequest$1.class new file mode 100644 index 0000000000000000000000000000000000000000..c518df95ed79f9b6daffd856f34d8eb068f67b53 GIT binary patch literal 2790 zcmd5;L3i6k5dJ2Tqt;auJ8@`Z0;I*XcHBfwo0d?g1t%oLspI4&G<_Upd84Svl9A+6 z_$wSaa4naV2k*h19|g=RjvI_#c;)%PLE72b-Pv!xot@d=|M>Y=03TvU!w6y14{XEl z2rm?7U>zCaMZ~h>d!f+@{Kz-fPZfLOtF8#6U8yY$Y72`RRKj)3ZyUDn+paiS-s~P4 z)n*t4rWNg)K_~*k=!)Yx(IbLVEFBQkHNPd&P;gO43ZsPay5osPx7`%M=VsFtgo(Ou zneKrZII`bUs?m`X61M8Y@}NS=QxbHm=5@N!eiR6^T}hmE zh9=u#7QU%Fk*Ji01epP+*I!to6Up<^FhRJJgtg{9H(jR{pPc2ptKGvx5%d?Pk;7XW zrU)CuVN%BxTxDpU_;yrhPMfg;ov^Ctm7TLOSsq;fh|sg z;QRk?5S*fp`Iyyl4c7^JQxe~cW4ZM|+-I(m%%8t5 z=4C-9FoSTlSbt_dHx0)(YCC-hmC}KRMZ#pl7$N1zTtc{!xcNV_4I$gl)O+G2w-CyS zi<6TOL5qV#-p<`2C8T~V5{Ls~DG|=U5J|Y3xa{2Hr-ZkwIBY-r%R zQ%E}BL5ekeLMSE8OyrXwlEZ$`?wb{gI96QKvyGkRGhsy>XM{B!V;JWQyYF`cOKdo@ zM&{4JdR?d&qyf%l`;P6IQ8(a;ULw7JFVZVR3W&-hKAG`#&vO0H@$7989r-OzPc_dI z!J2D^q2Rjpm~ihurEo6TgI;r3_lwSj2ESJ}{V>8Tv!?K?2l1a@8Qj;nXK+j0rsiJZ z!jCMBAkBX{3}qCTSk_M_aC@11B$?+~8J1PTfIpxW=gxfqKp)E*IiVjs7 z!zE=L86|_flEsWNfs!(bvXaA+GKC7aE6No)8=JAO)t(Cs6L)YocB_gd++%lCJj8u| z6DsUj8q4^IH8bq+G9Iv0W;DyFu(ZO~u0j2i0je5E!$S=pYYdac>|+hZhKAb{nJSN0 z2U)^RHZKoBo0AN!zGU$X-Hbve*wSPPgDQ^ literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$ExtensionRequest$Builder.class b/server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$ExtensionRequest$Builder.class new file mode 100644 index 0000000000000000000000000000000000000000..f80c23a713a9a75a01afac549f7fc88e6677a65c GIT binary patch literal 11386 zcmcgy349z?9sb_SZYP^g+ayiWhLD~qq?m&KY)wWECX7hE0G*%OAJ}@lod%vx3pQi(tICF#8YiU$wWHQwjw*R&U#74N~PDz zY*X=U)6z_Au+K^w2q>H#O>A!)NF)XZEr&>NroZiSD{dttX{&Fwl}behtc~aA_e@Y& zGd^;xNxFh4RP@HuD`VDR-#I-Bszbp{IvT9sJ!Ekzl!?cvQAZ!&S9flW?1;1tM&bi) zYkId@(R91Qqya0v+)720v7vM#Nwu2iclv3TUz2LezbApfvP7R1LM5sVR4H6ICKXLg zMUBGqWAU_=j7J80Qt3!K7VROip1j|B@_y^F{nk_Lp`JdEDy^rc$dt}l-0I3~@3oTa zBfW!`7%UNu3~r1hWAfRZY|P4dx#d`XNi<4lC$gIsjbK0@XxNS`*xoLNJk3M{PFLv8 zavHNW`UM#U3>4aZtui|449rkiIVKOE6igMEg*gUhD_r7-w^0dXVlJ8#W-^#FNvpt| zsg8Io9g7UcUTXCz^fY_0zicp)O0}Of;g9=K$su8o-V#eGtnC~h>txdgDNk4!qbiFP z%9`hItUwvUCeB222nwwxLMSiC*$RB4aE^%zOr%AdN9$xak*tj*t$4cKp?|*6KbMS$ z>{*2cW22@Jj1Ds4Dy(BnjI&+D5SN%(fJ+t58-2*R`ZCTG7Md_nu255u0ts))oeK5# zbbll>nC^%(AH}0q?z=0-O`-M*G-8Q~rFb3_&dDrk#9pa76%=@diIuXx zAe^*PG^4_*ae@J9PU=oaqT5zShU~m7R_GSou2(qUUj?5?&Z0qZudva?CcJ=@z2-fK zRjGNrq;~jhHt|9L$Q)q?Un0H5Vg_q085M%Q zttPf%P+=-#*zF=Ek8F$#W|&MEi5-HmxQPUYxGLqhs=K-4SxA(PNSa82;Wv@8F;?w{ zka>?uEZ-4!r<1Yx0MpS9>@u)Zp|bcp6T2nJ7_rof?L+C^qRz`qTq{rxCF0Aru&=b3 zry6mciR-b46l`|e;)$JcyEd~vo;gCQ-Vo0(>$cJqL*W%BUWr#RE7}p@T|q)S{r&7+ zC3ME!oC@#t=PYe-Uq^f>lcpuD$aeoe%hRw9#0a|Gow2m_Y)Dy=%jqctHxY2QH)Dym zjy1V&47^rhM&8X`nZd!e3A^jet*OAx2v^`%yxzcV3Qd0DQ>?m)H%KHHG(+F|$bjs= z-Nc(@u3Y9;#53Dv@(vSk7FgEGq=P?+oLJJ@knX=&qWY~S_F=z56Bm(F3t4!TgjZUOuQ5CqO_U#&SYfBV`+tH*t-`mIES%7T?YOXk)Y5mxwbdsm)AG>Bs=z@zVvqBuXpX zZs0D3`F@u2g@LEI%zKIAP+|;xrJ!0mcPrWQ=Tuvv!`*X6 zD1RO{BJDR!d=uZIw9&-SZfX4-X)o}eVqNC`4Y`LN#qMS0r{D9Hzt5M@*y6$y=l0c^ zr+J!Wdn?bj(j$axkfeVS_sIV&V5<>7GVx>lgykq=U*0rbq%imCCD}-y+A8FGm6TMe zpPTq8exb0*pMVYjm>qFG9I-E7t(2h@J#h-*X{a(Lu~BT`xx>BI zG1LTwwoz>Hy4+B_5j=C0n+#Q<5cXS~e?;PaTw*Abne}Pb6E*qWhMK~Yf!~gk;b15} zf4N|EYqPF)E`C{HT$^P-R~KBS+gEIcsvW@tMX_M0X$lQ~a<~f(RX>7_-35gXX&Av~ zp2aN8#bD+IZt+wOjqHa!JtW(JEHl}M#=E`zs>uOR_Q=b1j%T4B zCMI#!WPWPmC_jNim~xP#MlJtNkXiLB)Y&uq7Sk}@-Lb^x08BiD`s&7NHH?}0jn!NV z^)SlRA7H3QP{H>?#I)H=#5|l~ljIWsm)uIS6cH-JM=(EJb{H+;>az}`?c?@35yqkU zC;~cxkbWPgsK0t>R(fcP`l8g8q^UVpgGFWw~N+dVwOd27_s5m5g{P(4md{Ew2KK&^VhBmFd&k;0;!OU}00 z`MN}xdG-c!d)e~1qUH7;#btRpY2wOeQ3Oj zmR#2vz6b65P#z}h4V2-kli1eL|I<*br=woid)PL(Y(vCK5!+f}tL0|g)$#^@Dx*+y zgxjLi1J~`t#1_79IgG1M4`x5L9mb0UfsWFrF(9BDsq_p?)H5+v&!z-(Fk3g-Qp|EF zx>As~D@BmJuc7+dHUppKpTJZlk;m4+6(_Fu!^cBQbmxEfw4&C?Cd4#XBz_#tob74|!mO`Roq*Tzcp{ig!Nk zb0OyH1!&P1;X-{eF431_scy$gz0f1}s+`!XD0UVwOkbfXGZ}k)EaVE_%28rY2kw@? z`7EWE*yNl9qg`@h;TyT?)m$6kzSl@vP`B_s9f`fI4`SkdxcRChxaC;c;se-9onE&f zP#d_5-vaCS5{SspT)rfGHuF`@*P;N|Un)j;qogV^O+ZX@0P|#WZ-KoIFsrq+)&`E? zE&CA+EN9ts>*I}{l#uB{&9eN|W^(Ji8n{kbPA-3qNu}$}2hkh}3anH57AA9rw zZq!@w8XdzedMj?@c)Q+aTX3;so18=T;@x#|LPeX^7we?GnHq zrnnE{L!1-5%}0fKg8yD}0$~-Xsd*gcQ>0pEKpD8zz=vhLbA^GsnG)oA1OH0CZsRD< z4N4pq`#0n9Z!qUyK8A2NZ*U=98*V7@NDiM4Se(T5a>np=n4+&|4DYcCI|;uwr*_-hV-tpC~!}x4hSPK7H2RX4Lui6}mKmB1k?k=kK z5puel`2Q$X{1_VaK{V<^HrKF2+2ty!_Y3TIc;8H8KZq}qa~(UPFX15(jKx15AR) zDdT5t`J5s#D<@xrD_;$z`nD~f0K=B5!IA1a_^vBe+y%w#0#LAI6({GsSa$EBzeb!p4OkB!0k_ z>W8e;KcWGCY;$zdbGA+^d>=n>RomgJHqWaXgY1XllIl5t{|(jqEiL{#GW$K%dzdBX z5s!-Vaw^WFieL1qSS3eXSB6D&y2N)#itf)IfD$u$Ys{k_nxP!c5PpeYkwld|=<_4> zYaddNl_FKSgx6QUbxFQyI<1b*aHV)&DrC9dh&(Cc8qt9&nU!K|aem{HtwY0*2Kjl6ACn9%` z7cu`R#byEidFosKb*fvI`9k^Mr370BKPZ>n!neWn}U*-P+xO%u+ literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$ExtensionRequest.class b/server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$ExtensionRequest.class new file mode 100644 index 0000000000000000000000000000000000000000..9da2f0cd30b626531f60edf3fba0a7fe72fba8b3 GIT binary patch literal 12446 zcmc&)33yc3b^ebu(r9@IA!KYJHe(FZNFW2oYyzWYfWca@7HqH+F&aHd1EU#vW(3Ag zw=`|tH0c(nX-E?%X&NUjNgWJAHf|cbt&=8goo-3@HfxupOPaP$TgUBx-g`6iH1iOM zPQL20~0EXbWZ<2Fyoh%vAcQwr8IBkygnyFNH z!aTO!k8(kOapvt$cFsg&V`frNn=+HpaO{|wRQSW8Zh;&TEa{>w>2N%KEF7CL%P6|_ zC%A1Oc>i#4aA@fG(cs9zP_UtAz)0Fc51pGfM+CPF_k~Ck>Yg`V z$5NA(Gw~=j6dL2ex}HA2Y{7sZczcCRTmnY@Gz( z9X!}E+%q&1>Kh#D=<5m!4)+vCMVz_(Xve^y3bv}p%S1=RN$QT{DhHzz@o;)3X$r;{ z7}{PeX!|*7MSCO`r9SoxN}HOG34C3NF|!JSbp~$7YQk@~`YYLVhk+Hi)sH)=pW>)z z!`3Qnz&d5#WZ*V52$uFl<7VH?)To&p3XjI9jJlpggsv5)gYn&I+}`A=qC$t?;P~iX zmF}L!Qnd&6lduY<*rNJqQKWYnSc)YT*d~C!-)*1{%k|qm2I_GOm6)E4rUd@cOnBV2 zSg?W>^QC4CWF%iCFBL%rSSP5Cn`c8-Kt`z7!5db_aC|0ZjvY-#&oGzJVZ&2qnm*g? z6=>x%)?mTCH;VHb+?hQl%yhSziX@}c=>(I^mZoM;p1Cop4Y}`oE76UEegp+OZpvE? z9KvD2ebG3DjfZ0+%w*|kWQ5E{a!Gq6m$XN$q&SFcxe{{$`hH`*mn(;c*H>=xuu~V1HI^@H)LkIX)(t=@a^>!_r}oqaa7QEQ>H6eA~RW_ z3IiDOV^FZ$D;eJqFayJ2E?LW^n#p>eTIa7VBTcRV>57F@sl5da-X48JIGDbdGeVrw zKvzj?m9;W?NKiAsPY|T9+?+ODQ!tf0j5qr620_RFnVEqRoaD+ko=DmmpCwUKs2C_K ziE`9H1YF{c;l6=jSO1~D(1{@HlCB~sfy-jjOwGj7Oj3jCaO8Avc-l$N)l~biQ;Z1e zsqFn67lMRsN$GP`_YlS?S&L+bU{$ULrT6zyXM9><%AHjE^sV}W{>ei9qgG0f-!BBtSNLSDB;81X2 zgr(z9P^06=4ZIZ((ygg9+pVkcHvFU?Zx`I@W#0m527U@ZO$L@s!T8LSpsXnrvI~)) zHSi8RY3adMTySbSeU25bTcu9j+k8S-{+~1OF8n-~e~X_@B-nSOdJ4HbIK!7Ek>I?6 z_uv;Y-f=pfI2*UJA-!YMHN0auo*Oo3rkPFNhxhyO6pKqQ&EyR?@Pqgv#$LiQBWLk+ z^a6F#-bk5A?Ks(e}A2IMz{4$GCf}+EiY6I=06| zUNG=6e4MPEF}7aqQ7f9flt!}m;TZ$JsNUgAS^Ubm#|g($%J7nbi+EBMJ8NJ9lU0b~ zl9F6vbSFlcGs+U_N!@K#OomgFDy9nYYX&~4Leyr2IP7#Pr%xH^KxY+Z@REV6cu;x1 zq�D0_8$4o7NqOhNd91V#OH&8hD*E5Sl!2G=dxB;s4hm$*C0xggse-X^I?T|qJv@&>@K$eD zo?hpYbt0NdC(rdTAF)I(lIBzcf2b1vkzl9@usoEpEL+JAn6?OlmPN=aIOUyETq(S$ z`uJ1T$DeTnb_3IT$otOgk}X2`q6+qxD%f9H;ppY=E`2Tvp@$B7njY#bD)cWY^e=P! zYBBbhvE-pn!CCe$3VIQv=p34rt7JK`*#EXq1IbHzdO&2I`TU@l44AIi(yqY-!musZ`I*z zg0T1G8E^7Zfd??JVLA8O=&AJrHiGwfgDDv4$?to-9@Ap18}SVh!P~rXE=p;Ol3`)} zF5@YmbLhzn20a%L4BYhI-=#ldvU9>829^yJ4 zNXv2_WZf(%o?(?UD-j@1u08$lx`J`}DUwyQqVZgUd$n$JJj~;j++p>RqB$@ZI2D_h zEXSwlzp^SHJF%5MtCl) zcULR()2Fq;-J&yb`3S{%J!>5dn<>BC&9f`7%$~c_a9?lu%{vq? zifhL?57wMi=9hNvv=&EAceb+2l6mQn5kBwG+&Vb*%K;uq-5`7K8zXKyygU5Tb&d0- zJjd}1FN+$zKsW>aa)@5IzyP=F`h}0qAQuO(yPQA#qXn2ju=Sscr zwVu41CH+dPuMw@}z6z_^vW6}D$WQ-nxe7Uc1#6$h`m5N$ZyTS*rcbb=1kL=cAv~14 zj;hjf)R#6OVD)G(?^XF}GjFw6#`vyGR|HzFU~{1KGTH)lTQ6gKpyV=kD9=)h=L(7o ze2JgmMU~W|R(25hmo45aY!29Ii9lZ1h24a_hZF{%cFSIVQv?}dM){_F>jGEMocj9$%%xzJ|57K)BgEb!TT|`$tmv3dY?L!Dh9EuLhVSwt3xUWv?`e$)} zjPes_M!Dm(X@$z8q5OA}-4kAIAxY_>Fw3 z9_p2N4iELTd>WM(5q=uJ76oP7yGqAM6)L3K;%D2q-r-k4yP6myr*TpkD0vNaems1D z|7_!lsx#M<VBg)}8nlnm3?u*}Z` zTAqsFQ3v4@9H+l*S?5AnrwHe8t~bz1Xss63e0~k5dAq%0NQ{2KSbYlK2>NOIS@s&sYobAnQ^zrS7t!Q}* z*(-u=>LnNPBeQtoDHtt2eVxTmsJ7wLTS4K z%HXSMwD8U0>66-SBdlq35dO-^_C^Ici(h4T<7IrPF{j2Ys3z7vsI@1j>tnU{5@Iq9>OQAByZcbRMp#1PxQQNaAbIaI#q8w+!FY( znv@YC=TWJ-_t^{dI_Xt$TKP}uXzN*AW{g~(#TDE3M9+6hFlW8H+#tZT0Xa$hVfZCN z{A2XUaeCw=@kbGmQ@BS?@xyk2GgvQY(JGH(yPU%Tc?`YsI0ocRE~^YVk}X9Lzm>HL zmu-V<^~?A)!Dk%)GnRYpWQ*q1B^R+N@Ekr{0-7V2q^&hV-fYcauQ5yT3O-K)W%v69 z>^dE9tx`sW4g@`3(^ zC(w7~0abqUf&O&?=RBZ);|cU#c|gD70R3tKyL_Nu^91_dJfO;NKG45&akg{aZnmiD zxjJtO%;E3n8Qxw9Kggoz11_l5_(%K`Nz_FDT)?o~F}`6Le-8iR!SSQ0mXEl=8V*NY zPBZ5JHm(cy-#l1bT zm2t}zS=QT{18axI7bx8qEuMD8lPOq9B=w@@MkV3y60IOBQcPkFmbFBxNtp6EDuU9<&wHx|;? zc93?BLwhHm>SSd!%-Xz(+2L%^ERx$7N77AWt#?Q^QctS0@<7Y1+_BI!C8CH_mu_Sm z7ba<1EJ>y3EcszVt~vQD#cP%YNGhb6b$B-KunwOu)tqe3Z6NK9UUoiSEZjvEl*m@u z#^)$YjP0^RcCu*N#lAhhO4-ZjzH)=VWh|BbY`>S!4hB)DbjzOdN;xQp^+_4JS=2Vr dHSNEC9$)R@dl}pBWB<0T^C14}~zjawND8^vHGQ8A+S2aM;D^A>majQlkVe}F&AIGl<= ztSEM7cAojZnUBx6cL0Z|I#?0tDD6o#qR^15`;U@dE!(~djT~uZm2CYwZs<88W3MOP z+M>6kn~@(3NIOUh6o>AMD+4$5WT!WzzIBiisP)y8^px@f`tIF}9^@q%<9al$7btsV ze`ei|M|AH75eX#fjkZ8xJ|@trH!hY#Ff-yvAjggGLZ1<~2LcuD`UjzFwI6z%RGdj_ zjx8C1J{`T^uuI z*uj>-$x>}=Go&$RbHDTzAIG^emN(7o;0MQG~4we?u4P>R4A+VXwG zx~ElWj{_ZPyEVUfBj3g{j9w?Vm6>sPJeHNmCNhNLk$5L;Untjp)jN_-WWpd+9X+yL ztzBPE@p|#VZmUQJO8AwQibowUl6zuIFt$A9McagQwbmtMnz}FZNMq50i3LKT<0-ix zk9smV6g^)OmO9!IepduuV$Uu!(a;MCtWbmg+ z@XFL-7R;`=hU=i>4#V7-!ECZm0%O$6U$J@`hbA}^8mvt?ZsOKN-sX1!8D0tCE}wPQ b*HOWykz<|;?z3D*74_!{)AW15B8BQ7DqBzw literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$RequestType.class b/server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto$RequestType.class new file mode 100644 index 0000000000000000000000000000000000000000..4599b900d15dbfc53dc3f8d6e2017634af94479b GIT binary patch literal 5611 zcmb_fYj+dZ72RXm9$5?^449e#QNRS-5Ic|nA$9>Hk8KrM21_=Gq;-Tm2!T9fq!9rU z$g3%B9(|L9w54gL%7&S66#CoSout=HENgzBb84dZ#B-?{GGMTLhESWvaMEq>OsG8s%RAK z1LA3e0}^LP4HcCNmYhhPPPOJzW~Nm)iz6zk6>LgdBdrh>jww)cw$IDoX}(ONMCFy936-!^!}lEq9dUz2uHuLoox!%-(ep;k%+{) z<1~A&P`}$`WZlpY(5^!LSj^;lxa6>OX<*r^}TW1V^| zoE(b8y88NiJH*xsg5iFhaTyBt^&aSpQH_G73|fS;hlhImItQYRnbxg))oTN>em&gR z9g7^%I~8o4yW5PI4-Ixi2dG7fC|DdOAMBKA52kWO!$yHRS{EuBuW{H#-OJza;<4S?O^9R29^E zGvof_XJWIaZRE{VPDZ*nHL9RFTJ9W09JvN@f}&a5pez|)6VGPMlwHgl3aoiAY+r!W zwVS(FOV03QbUv~G8WmKX7JO5%NRJKlHcTn^x2JPiGi&cw;A?CeR8SeVh7CW|09pBf zh6k~ornEaEs~V$jZHn9ZteI(xM4AR`D7;gH3cnvU0z9mtMu30-U)B&1V37cOH7pW9 z6CkWX6JW6bT^be(utb1}h9v?l6(Fi%sQ^I%`ZNRuSSCQfhGp=JUkMHOVY3Rl5dOmD ztzl~b12`<)ZxzaqXt)=fL`WXh@Kt<`A(7E6(CWsfg~*TCR^y5Z7JPCQ^D@!{a!`<9^J_PsMZjRO#q8N_ARRk1dT&^DYBC!KeNY z@r9}ywanD4$`oYjo3*e_#Rc{ga~Q?6+J=WtPjiQvbQ+%C ze40|sl9e&D=ZxW*ygH8)A_>oF_&S~^XK24`mV#~bG~HdpoLpFIVhc$&%A9 z*6;#eBoC%SNZQnLd+C4C&gNR9*@7)8^Rk9-gN#$H{BV{pm__(5zOUkYQ@z`?w1ywx zhdiECDGm-JvV{`%&B{Y4TL`hzgjhaUO9^aBvtnMtyBe+vn)s|UG?<#t zfRgAIwahVoBTd)ll=MQm%^iJjTsk3Qo^eo#UuM&rk zk+~EVuXqKAyu!}Qe_?{{!=#2$e8jkT_Xy`wg+iOhoas$n*F-*8Y6Eerm`@vmRti>^ zy=JwDMG7u>^ohj_Q+Ug!v&{93rfLz>H1g6nqoCq8EA0O`{H!X%QkVD&mY3ln#bzEW z6@TQ-qMY(_4X=tnD_C0)neWS4u3r zY(Xm}@8<>XF~&xG{&f>dvUaP_H-T-}u|uMVDk~-0B~e>dl|=0l?XIqtXpcl4sw&Yw zi8}p$iFAp&gVn*3Y>&iW397-8^nQtZgZ^MiJ|^*jU`>TwelQsLghu(sup^$V9K)`7 zvT6+N@nrQF_QaFw81}`J{xRtBWRQ<~IPuwjPJ9^SR5ONy@#~1+#6WTahprq@JYMsWLAc0m%%&AG2TWD>NzfTYSblcHYE!!xkgz=Qjg*km*@EaC~#C$(B{lE z%XEunRjVgayv83T|Ah#+MA%x=TUBBU)v*(GtiHur$Q@aImEXW2`3<@M1N^;{qm5$^ zN0_6VV?Re9N1WNa#@unVbL`>R$DwodaO~%ZT~mZbCC^*vFK~`;mM0kbC((}c*ozD3 z#8c?O(}?039Kf^8!gDyxn7fuAaV!@R;Wh6;-_(b`oWWUoTsj%&WI!6EM*w-1oE?uB zJh^*qc^$n4-sM&MDo@p~ zcuBs>D)}By*82$In#b%W{wr87a#o3o6^m#x@A-}iJv5<`|7gvd*G%BO=CwDFbIYYgf5Km?#XbBo{<^oG_y5x8Z&J_Y{{VC&vX}q> literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto.class b/server/bin/main/org/opensearch/extensions/proto/ExtensionRequestProto.class new file mode 100644 index 0000000000000000000000000000000000000000..9bfc6458d4dd93a78e64db200ef57c11d880139a GIT binary patch literal 3318 zcmb_eZBrUo6n-v?F0KUWCN^r@m?l&aEktc<5^Nj9)evDZELv^aZdbS=n;~GACDVNC zxBia)gEpCI(st6xOuzLmoU^;#owXw~g{LOJ?S;NLfS!`^njitsap9~h5}MAfKO68VD%yks+M znKheRHKVFGY{NE7IyY8{;F1y~#u9tbEWSNI{-C|hCmg)}^m3qBWe?!ljB7?k$g;l18mo9~2)S@(Fg4MMFyq@e`=x?cQ-Pa##v9YbNzl zraa*z{7CHYpM272x5{N{x}e=hMN%j8u(o!!xjNcFO)bpid1(L5lY4t}EM$x~{0{eLbUYpNK`EK)9mn@X3dz}oq>@!q1j6MHc1R()B-S%&yYb9KBACCC^L;$56)xNtON zYfZCS?u>y0*s$sk86HLNX1&GG>dfTbH|_knHwBDQ4;VT|b71MjqQgFz-jH`=CcRufsXVJAm%86Sy=Foceu1TlkInqwki z7>{P!$gTyN6fwsTJE=NeL{8N6K@4G7#K#QR2O&E(dey25Ai>Z*g8?kk_StCtN+;Q>Edy>hl*GOD|VWjd?4 zy^llX&i-FFY=hzK0A>jSK6CsH(#Ev!n^dWl?5S=#+QH)%uTWvRm8w=dvqg;Y!SQ!g z3NFlgoHcEpbar>3MPxmuq76*lU?_FOYg!^JPWNvFaT6(q+1E4TTvJ-TX_dG$#SBwj zZg^LI+$kSJFjJ$OFI_bn4c-uuWtbk+bY9^pQIhvYLveh!h}dSB8AQ?Fw={13m#hel zB0g1ZBfWvrrP}*R1Qjm-cIfk#yEHldwVCX-(>W8d&yYHu!LB=$rdg$bYdP-B6BX)f zhFhm2?e#RWDB@cN^)0crQzGI!hVbjnDk{Vx#O7v!+E6%$@SevWfbG4UKzzu|)LJ1+U~649rK{fUMBCx{KT zwBy>oU%sL}!_rfH`YX}+aE*R%AVeNq!?182BP1mugHho&#z@M-EsP5}M1(A6g*@hk zZN!B;SQS*P2?bmyyyKiiOgE5pIcbx%-A#~m+#QNN!>VU+*!?JWz)qB(;|o$8ah5?F zJWuZqnDqjsE+E4i(rw^+8@NKUW%W4}4;*qcJw`_%_z~_R86uKRmxM0jHc7;#0Q^rd z@e7@`QssIsEW21AeHC!aJ;NX-NO6)o4qB$(m*OkvK!J+U|>?ET$_jD58-f|;Sqep56PUGvlk-vFHYtfSfAYDAmj%@k zOo5B}+H>orWjdZ&-5x?H74{4)3Y?Ew6G%9!Y6Y&uUjCoD7RbosRV+}9Jse-|0!EYz zs&ej(R{F!t5)h2{QtYko;7j0U?6DK0$ewc{mRf;xJE3L2*s{7&yEVXmor??OaV8CX zDNu;p8B3dhjL+`lc1P>HQA)Hew`FcOo=ZDqIti?$Fpdc}xgD?P+j8AeZ%g{E$813z zW82wrTCNrLd@k?ClKSr_ZDq7*mmZMziHd8ty})r>TQWTGn(QQ1*OmTS+X@27_Wziv z`+mKXFKXWhX5g@QpRUUw-0*@Bv-Et74?ChSUs#R}j!a5HnV5Tn(?3zskmOe_Lo4Ad z^;CZYhxqJwr61zz zKf_;<`Fcu!gZZN=C12B!&TGeMSjlO2CnXEG&J_i1L4nfB(6u$3)gB_PRWPlsV@7*~ zg0_L8R>hLGi4x_CR#UsN5&2pkxImb=ft!(AWh~(q-O+I$xB0hcjE*I-jJuqfp~K6# zN3BRS%P3J>p|wlU{~|!$fG}|1z!wH#Qq4RzkgprKKAA3ay?mS{T%~yx1Y=GywECLr wTXYkT!m9C?z{5zZU_FwL=z*d;%WsvxX`0Dm6E#dlUT}(YS!{7!r> literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/tasks/proto/TaskIdProto$TaskId$Builder.class b/server/bin/main/org/opensearch/tasks/proto/TaskIdProto$TaskId$Builder.class new file mode 100644 index 0000000000000000000000000000000000000000..f5716e6de1a6421819e54c1f7f92d92aa831d73f GIT binary patch literal 9996 zcmcgy349dg75{&mU4~5tNJ7G4Al#U2fG8j+0V+4aaL6fG4V%p*SqQsvcfz5FXhE&E zwpCh%dbTR{Y7rrzwt`n}ZEL-2@7AlQM-SU-5&C~Kvzys4i3#+l{IYX>@4f$f@BiNS z%{M-K^pVE^OjNlVdf>`+WHH z=4fNBkl+PQpr*O5e2x(_5@FM*oo^(Q;d*2FxK^2N3KP0yZehZ) zr;uF}HD^bS#@f@W6;!2y7K_&!m9@OeuUZpcA1-eU$Lh-$)~qoiW`#mlR352$3O(u# zbEc7uB%)1bJVDqcrBzC{&mnjhRLw7H+Ig znqe~5qB zuAQ3$Q=sM;c2K89B1~62B{Zd-5P2lm%1MTJg@md|#{djen3=NFdH3ri%NZIKDm=r` zIXD^yDU9x1(j1y+V+e+67^-lV7gIWASI2OSP#DC@YEBp#VMH4!aMvNvi8jmR~_NIh0u)>?!9qJT9CX6JbMdEcWy%g=S#0*E) z&JQ8_Lj@4kZYUr|0U5i1w=}7(6 ziP3t6s!#IEt4U9*Nt%gp#B{t*D<=QrW`n`zFwnERq@pFvSgWHE3~Z2Pw7^Xd4=8<1)}lt%7y7C8m?6+Zso>;=ElZFacgmu z9+Qo0P?nAB@MR5OQW);V>b4&0_zJEkyXbB0(lBd?oJ9K4TCI0XcvLJ_XyJsUvV z5K|z}XiFJ|GEZS|ZgB}#8;^0OSF4yIQ}U#EHQrN2#|~F$ctD}lD^i}eHGC(6yOCG;GFXV-FtEu}_Gkn{oT5@P}R`j5-Bh4Xb^yryH$W7(m5g z?$a9@23bj}vPo-n2>PL>udP_X^uF?Y-`B z=_BYG76+Sds`H=YspiDyRFaUiZYBwGtfgwoM$fc8$McomHRl3)qFq$xoM(0X2tQWn z9=2Wphfh=(_Q?j9v*B2%Wtz$*Vt1QJN*8%v$8&grOCxV(?(z0S;KjoZ7G7LzHD}IU zBRomi){tO(MP@Ey8`ch|oCSb2DacIsnvT!NB6(29@VnM*8$V_9F4NmSEz@n%AOuXqkW{=bsR(mV z#T!s+;o3QRiNd$!{dNMUvW}UZ0i^v^Vp{@>g+J+-iL>SGFVYdVb02R>yg9-5+o)_# z<+^ql$H?M{4R=YjJ{C5)WukD0C$rmc3zJXqU1MrR*96nslB{{XGR6&0Yum=)^6&4A zrdKa(liO5YHT;u}s1pM0xXw$bhJW+5w-X}nv}yQ|iR+}0hW{#ryau;!i@LW+HGItO z_zB~g`BrI-lPgAgNgSVCO>viceCNid!fNj-aguOimb%W**oLz<05rv2Y0nI_;j^Z? zD-?O*a0Y6M8(^L>b_Qf5L~%RFGiI*A+~=?hgfIT~Nt$__vKJUhb8b9o;&cWxi+=(B z`ee(A+xtG*kF$>4c;c}P{m}zGIjR@0hVv-@`S&7s505_d;cqv&t45)(b%#&Uk5ab8 zG|K{zvlpl27w4-M3~m+7=TNAz$Wr5>sd3y09BV1jEi*Y7iBm0=+%k03O`%DFkP|w9 zP$+-YevEn08X`D)(iYU2{KW7uRZVI~)oG5Zxm4w&lpZQOfYZHoO+|pZa@5(Du0Hm7 z2NoqU4&$kfA44#~(R!}qZ(jnOxFVFbACqj=q2PW@4z>E;hhc}BK?5@hb{2BgY;OC{ zae?mZ7*d#;PQVz;oOc4|yJY=oSuQ~wXldDlnAVEJPTpcqS+-zDHq4_0t9Pgcv#QEk zFn1TammNgap)ygGW%0mH=L))X9ycvlB2S&~id%07Qh?4Z!nstqn3CP0jv!Ctl2XZ* zqGK*fjbg6oQ53IaB$rf$_F>XabPv(=a$;C@oY~a~sxTv1gZ?VwGF#-B4I;m-*+Mbv zlRY@c?Bz5|3Sp>Nn=-(^WhZjVcs^!7E*KCfIMW?hc4>Izh-ZJ4RHW3IZ=1$LkWja@18E>!;k+(%aOvJZCDgW7IRqc@)>IJb*78%9^?x zYv|J#C-@8f_wgy5*jc9-#)EXr68`1#Z;GGeCrJ=)$OzVMjF6jOlNKNm3`hibBR5nw zs?dJ`J9i=ApE_{ov72@yf5GS$+;V7AKqA;baL5UykKJZ7F5{Mmj_v%HqF)VrEz}L{ zc{^BhH=?JyiFLOVebp{ZRySj+x&_nJZKzbYV~M&0%hjE@P~C+FbvM?kd$3;Ji;a}o ztoC55dXOuvy|_v}#9gO-xQ@r`)qX32Q|;KK6S4!}#GO{mci=AE&1m*S47>3y@~STx zd=IJZ=Z0Yb_u@X@6ULPyTR%diM-Wo}f`X5rAEVYR4NAjx8t#|lt+O0B>3VY^{YQf$Gq0tQW=smm4sy~MhB5e4cMc8^zy@ihyd-8}}U z@fA7v3OHRqVc|+W(O;OOJKFfMh3m(c-XOkrqHMo&oR!&YWR#+QeH zFCV>pz0uoO-~w8h23km<5-o+Fq*G8u1A?!Q)p2q^wgPW0LQ<{SZKDV|z5&Sf4Mc&j z*ag0iV%dOyqsGMEM##EW=|D)Qt5fS0P5Jyex;q$+nB zcUWF^RE^-#&PNM=)&>zDIEzzDr#&1qe{I@43{=1VCulI`A?>lz*jV{aZ%e*7wX3U!%nK=V*cU|NU zT@{(>N##Fw1Z*a}sNA6lw^zMF_UDewJm*d24rS(edNZpdGjlw>X`9xIK>vH+)y&h(ldN$P zeV^wo_nve9v)+5p^QWJC{7C>i#cl&WfsLv3NONl3N@lERIyTyDM>EGV&Ex5mooe3C z`^eBfd0+3`8}JLPi>1bzM^dSggyl>eoEUDt#!6c0sBI1PSeZ<8#5%auK&ilvqS*JP z+b80QAuBCVow3sKXyTxi&csv61Cb5^u~(pScPg2&qe=TkB=my_C(qe=viQDYmo?ck#|{ZED@)6yQp81!78F`RpaG+b*YIJxWK?#YFm`D^;?3d$7;!=!NdztN7Hx5lUDD<*r1i(A013k zwc74fjGm9u|9sXjw>EexTo`usI)qxJ5<7~eY7OrqVGt$QB-L&z!)5_UyqB3+f@*oU z&BRia$-5mUmSH(74cOGs z=*crTCsUuhZz)Fyx(sx(LM<*RO3%O68wD=%GC=!r+j2;Q zID!|O7{K)crk!$L9HSCJ4U*!eCSHcWWYJc@93p`$ z7Yi=p3aMGXT*zLIzcO%_z*?^uDp1kHU*i=5K?Na_w0CTyF&iR#<&r4yDig29Yv_BL z_dxPkGIeWGVEqojLflvtXCSHQ zr=%jE7TEtMknW*#jg^m7ps5Iz1g`b2T&`k$K5GB7RQq4J310+v54B!$PO>6&JRki3 zCh>nkfIAMa7Mdh7HoP?Tknc4u7cA>Mja$G* z;4W`41v5SQ%~ywt%W`S|UrF2iTHqdUI7Qi}D5wjYv4*V^-AU@=*_`bd9z?mxElR=J zvn;p00>3x$EPl()b&j)q{hW#CT?|7VkwG#B3m9+mTU}fyUiE?RnPrsg9Hjn15{4clOPf#gkN2_4?{}^7!kmmTUjlc+e1Q*hIl7}VKwk5r=G z6l(<&b$M*miXF?biDaB^O0YjQk&dwkCOhoat}i>AB!3=;BT0V1QLQg4V~8-@B3=bD z*KT#UUJbEbz;;okTFJ z%wc6jAQV1>)_lRvb55m|C?%V%@Kbz{;+IhTGHjzN+Z6;yOh`lU(^9*%%~~l`Dp;m% z_Au`qGnFc6EiM(xvMQ9}DqPL+I_?8h>QsK*@3OT6QJJhI}iAqLcfT*^9 zfH!Xc-|jMcK(nvG%}8tFqns_{r;fziTSNYLBH*74oj{;uGVEuH_$N8&&XGH*%gd?D zU8rR@`2xH`32<_xE{lH|MJ%TXiJJcr;aQj>z(@9bDQ$u;{d~GbrRbf!mk2gVqt4*A z6EK_n&izZIM&{Y1|o}#w(kq@!DMKI=OQ<1MD7}_VuhMZ@^mIi;cL?1#Odt zMme1G1`Umpm)7u}y<7gu8Z@QR?4juUT0_2fAP|}?Y0YA*3TbR_yh*-P=&C{r-3%VM zsWl`4PUCGH4V}h)p`7_Pv9JJdqWRuT^F6>ac7mVC--=7|Hf+G#iT54E_%3GTgG`l& zT-f(y&AOX&Wc_u5p+uT>#Zo@A!>sXtiuh%SMw-GWr7M|6;snZN`FqzSLsj$;;6u!% zho`(lFuh;X6bao=Mz7_>N?q5Frnle|6Z5_c4lmN+lBk!+aLY42a4l9A!gWtMv?( zl0kBaHlJi!keA8o#@6bkq0t$9Xc~{))LOkZH0tMNduUYZIE_=zyFm8c$r(J#hgE6< zReap>(xl5vj*^d4!L#(zhgpfAWRiXaYw#4B*_Yjcr&;(v#`^m-cH7# zA+6*R7QVAtZ*fI6xr{zRmbnCYQia@3zLEJ`IfeD%$MBSoqC~EcI(7UrN`kWvR8rW*$(x;{>3Xi9>%%knk2#JHauDoZ1@S`{)CznX-{E`dqVE^ka z{CQx1sV)1z1q|~o`+puBO;m`AJg|~u&ay(DDRoWT#H)0^e+IwHOLtN2!sXWmrCc_c zH0^pt$+dL~Yo_sseDYdFF2jdJl;Nyx+o_a}Hc1qo7WrjHEO*JRpmpS?UC0iWW^tot zu_{c#CsBD8eTUD8z`IeU?`D}gTL!gN7j-biN?gdR+^Rb@Y?W3@ZxCws8tg>*R}|yg{(p{ZaO23o((xIRceUUC>NI~p3d?zyQw6R2+|R>7cV-ph?!zPBPj`!%fqoZUHf*33-UQ zaAA^*iY0m8OG-hSkZV?dJTE+?EJRW!E@lK~7jtomijbVWXT+ts1D>;pXXT+ptfRSn zV!deK^#%rPqi7Nv`9CH%acr}{Tx{WWYpKcqCRHM~@%?sQcQT1C7cUf>OUp&8*d?#j ep+j8FeN;X9$iF^zi#GE)!1ryO*Uoo7bo@WBWKL=T literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/tasks/proto/TaskIdProto$TaskIdOrBuilder.class b/server/bin/main/org/opensearch/tasks/proto/TaskIdProto$TaskIdOrBuilder.class new file mode 100644 index 0000000000000000000000000000000000000000..f7c79582456b3f3365b096733baa8e559073dbaa GIT binary patch literal 429 zcma)2K~BRk5FEF(X+miW^aZFw?FHZ9z^zD#AZU@w2i#;aDIuD4DB5%lUFei4V{g7$xReYGZUxTb*V3AaBVzrK#o%gqr-!q!gzD!|~+i zTWjHMR_JqqUOh5>_2dMOcN(@pfe-quZ;5?0~_Ge;;5S(Re%nv$U;P$x{Ger!5-fY^w8%T P@jQFPK|93Z=6&!5QxbG# literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/tasks/proto/TaskIdProto.class b/server/bin/main/org/opensearch/tasks/proto/TaskIdProto.class new file mode 100644 index 0000000000000000000000000000000000000000..f8bd0a36dbcd6d9ab5cfc57f139241fcf3b8c59c GIT binary patch literal 2532 zcmb7Fe^=X77=CX0BW)U5?8GtDZO%GMVa*tBxD}@q#;kNODMJxGJ!yMaGDE^i0uJ@d z_ziTNv!OfN@oztr$M+^(Q=~`eKY8!Xz0dRhe(&G^{P`NdEWVc^FFPdRJ)o<%xZ8A$UC(wHrm9VQM{C=5+vI`L#@=IXv3tYKh3gW-;^5jI8{BNo zH=EpbZD(C?n4F>1SL=OlaYy%f>jB->+k9gtw{!wbMku`wT<=R28toE?$-_P26LU?| zung}mLnJ@3!4O@rTUXLm7Ed} z0NJz>w@5{$HOeltd@`#jlF=HKXdNjJl~X;mjkwP{BjX%H@wD6=cTGVSBP!m(d4`K` z5>?(a%vLwD0@;qU+Sq`oS)D8L~v^d%NcurdBmvuav@v_((>MA$|Nr#Z`<`X%Gp+ zNWRiR_HcZkjRU{1%lT6?6PibPKT!%38^Y*W6*3YO(W+tbmA#z?f78O*s@>Gh4c#$BOLzJ@ z-RcgGMcvaG&h=qd5W%OyUX?n}+cqdY`Rdt*AT{rZ!`qZT?#!F!y{8_HO~bZ`qS)Wz z?v*tk<{VTFkC#N#CjuhpfKt;pR{f!r_cLlDEuzMkRD6zkhVip05%aBP?>S8_dX*+p z&u}P;EHS7Riw5O_sk<(BWmFk1^(&|_zZ4!N+)=?FpBpkB(lgYL;$%%yE1jIlGHMLh z&X&Sl;188*u#iEyxHS-V)$xZKr{^mZko5dXt#Fr`UsNi^=354BAnx3>jPDq34hHJj zgK5flgx>ZD#zeEzYE1MGNkxo2k}=w)pwhh|+WktWjO0S}71EoL>~L+l5P5~{5#Bz; z=n=-A<6`6wT#@h>@`otA#MI_96k@^cVr?^;yrsWRel7%Em zMRJjnJY+~_B?lwYkH|^;7?%z(DgA_^^fTt9rzlIm;5OlfaSAcrLCNQ&M(voZkrd{} z3NJ7hDvbL-}4A9*a}0WQ$v jvWyjyQ&`1UB-e;vW?+v1V zi8H?F_ycu(z?b6448HqEspoFfsO?Uj&g8*|-Fxo2_n!UEcg{U$fB)mxR{-9_nuP&| zau7D{pe=nRMd)nVTBvPhx5GdO_5+gdOBHlNN3Lt3)jgxm9#CgzEpUblPSCQOLD2N% zu7P@I(=OGO4uzxdiBQRq;n;%fyZRQxU@pJGz?XuCOkxlxQb^z!!*Ip*Wv$by%kY7y zdy-+a5;(%!5TR@4T_x9Bu41@d=?nZK!${izB$V0}-f|eU7(UNc;yf~l8!LXhqt|sP zMXMOQtNc_Gr*7cimra)___X3`SLppsTjcf|<;37jtN&V2@ zq>HPzWTPsT5>2|8kK$?Su6OhO=!r?6&SA{F8biW0m4o48?8gt( z7KXIkUr87Wv4`VJ4uchCh`Eu~ew3eH7BVo(g1J~M|G^@|wb)~a#+K^dnOMRZPEmVu zwyUBYwFe8-U$Sw*-OsOu4;k`tJ7dXYknuUc-|o?RLX;?;@SFBp{jqd3B^g!a6oxQN zC3QXOzss)ah35|R-l)3I7zp+p+xqyBja`BOQvjC@#Zg-|+(F`pv4; zTS0@$(u(iPaLE%&N$OFnbeWG_v?=)y_)F>BYcR)aQt3N^(wHQ|2I;Q`(VKoGXl~KW zrjgN@n0kTZKa((kB)!ct_!l@ya%y)2ji+eF4C%CMgk(;$v@yem$ZUc%rC)HmHeI;9 zgRw`?FnfcK@#!7BwS%{RGx82D{XKn&^v7fT1*Z2@W_(VDIDK}A44ZMBN}U<=xI!xi z+Kd6kzXIo9<0StEBm7&8^Y1XhpCQk`M}hxI?d@XfdAegv{Ymr+e z%;8;fhvO!$(?8!JIhMpc-Y3ljIXsUWBozqFJc=YOkhSyRe-c1$fmyg|;RB0cl9*n# zkgHj^GCESC_0oQpaEZ*DAXrm|p~dGU{)2A9W3Ud;lfXxjRK{{N-X;$W-5GkX&@)bE XGPsKh#v(6BMY;^CG_R37fV+PIi*iJY literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$Builder.class b/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$Builder.class new file mode 100644 index 0000000000000000000000000000000000000000..a48f77690417aecfe3c163ed5637b55f5a34a616 GIT binary patch literal 14366 zcmcgy349dSdH+AnNF#}n5E2N4IAm@lfiMsZ$QWIfXqLxc$Pxs!Y)mF?(glwyQ zy_HOcI;`z;GHZ$y*5yHnjR{W*h0;9{dsW1Ww$Ex)P^}8(2~Ue{!kmNS)&)tqKGfwb zYgQ=P66uJA?Cyl6aQ}dluV1%6bTHHu4aGW|w%CbCtfM&}>SS5j5xaRY=jlr zoQQPU@dT|sZF-<|nK{Wxna?DkFORodB^ZV(1CiV&JnvQ|-3Zpa7*xVhn zBc0aD!(pq-j>KaIu2C47x$i>9gK#k>a&c?yU?>`Cuj>qT)$J8H*To#Q7uJ=c4wFqx z!BmB!>m#v$>I%@StUn^**TXJd-fZxk<%`*UP#4vd?@ zNoHWF!tyJQ{j8)!pV-M#EW-)|%M}&`Wz<1QV`3#%(OcYum~XS}m~17sBaDLEEyA|V zkMQNuP%_!r4#71mUm_2x`r$V%Z!M4qWf#Oo^z)k}nWNrz< zK`J_h70o(4YZqI^a68h)L|)?z?+tZF?bcY*4#mP&`rhjNXikln;8qiDxP==>Uw$<8 z9w}xdjqEWImT9IlLeB~kWxZu$FFKf(BH8#PdTx@15fl4yfSW|jIDhy@SN)_n z0fo)M0P`j`2cvmFnvyqJ?$Cw`EAs&s$7FGVa+kuKV0i=LD}@mW1%-r(Bq&jt-hX5fddS6?ooZ;!e3+))_irwRU#8`-Z~My@_~do4;f% z>9i6Z)+!DwM1vL1I4Hm2sPJ1Rj^e`#59i4;`LJ(RzBncV-ov8(*I6bg5wz?{+KEuu z_UxA?%(#o&`z)rov4J@Rvt+B3dn`NC9>I|+SRvxyw7qdv+Y;{EFuOsUswv}hWc}h#C!K8r` z3RO;Z8HqQwZcN`X@Hh`^nX+?3cQm>w?vzsLIi+|Et)+MZzi;5P3X_6hm#eplKfser z6tqG6)(}%UQ+I0#K8KSAo>Bw#ht?bU_g26 z0%Nd07zH`VxvtZ+*IGw% z4=Wq@?6<;pvw<(rCK)ifCmS*81rrx#eKFT>4@J8z1240($p;GjXJ6;+I0UcY&kcNu zx@9S1;#K?wfymfYnD4K2J@I{|t~cU8kaJs1k^AB|jc4KaQTuEMN zm7?cOe9gdDSwsdo*E`5M-J@_kx~zNeUMrD~UlV_czoN9=u|tVamoNBvK2CW%J1eln zDU3)GQFu3}u<+^v2w-lJuH*e{LYr zflWY#sg=+Qq(o6z92_z!5A;)`r`9t+?ZEd<{6I(#;ejA^ zL&kq(;>U83D2XJ!BqyW)Y2v@6eW8crUCd=8n3S^ilI3ysl4Rn4oA@6|l9KjFRx-=~ znfQO%+4Llj%v>9e$Lvrfmdqm1Pp{~vVqE5)*vWR&0gEPi43mZAyC#9HD13^RlVP*E zO{sh&p~0onlFLoAgt!!6IdYV)u(}dfSUSTBcc!8}mD{f-40!tVT6EhZ$p{-tEwNZ! zI-BtrYrATnJ{gheTct`(HAI|Hm8%^+;vQ+Ubl#ZoN&$tJ{Yl>4jNY%-aej)$()W0Xp#3zY?D zNf0{;{)E*TKWJs$QV6|os-&6=g1>7_H9_#lu7}9Ls)4rk(rJ!B6|5O@hRz1a_&z}~ zKVD3!UaY2ae0DNKwu${!V(xQV?>wedvx%Ff7dn36`An<*w1sko2<~k5Z42EUY`i`d zexT-ob!k5ZTYsG?+2fZ=v*}T(Py*THUl`wU*w-REe3jYzSRcDutaIET08EP^u2?Cq z_>jViV1Yct_2c7QcnQHZ@=RhA4}$|EJ4=xMyM{p=_!ww6ZV_G^m=UCZSh~eIH?E4v z~(c(N*a}LTgWm{HwBM@=@XNBwYvU#O8nZgIK_GjHiR4Sf4ZvVu|0whPs(P9^@iJ?NX=@nw)v_!v7YAq1Y-I z|7+$GB$?HQ+QZ5^XhmNj7>Y-!xr3XVGAp}yEcdvdcs5IYugrQF>og<`wXYu!<*xXULmjxb_F3Kq@4%rUbgJrmF#6PajN5r0bvr93^kPzN52p2?u@|#0 zaDwKLuOh{h@W*74A=Sp1A`W3-e83} zLSgQpFn3XyqZH<@K?}103q8xUIJO6doJMoyl1kNsmW)X&DL1rZ(6oDDX!oF$pG9BO zG2ReQ-vW8i?~$9ss~EMgz87m=;0MGZGk-5 z_O$HgH%-_Iw?$+0qsLL%AR+o94mX^^;U3&BsPG5nS!#O@L$sGrrJcuU?F$a;I+siu zA+;XsLWb*I)VaXXou1`t$PNmteo{5OXs?fk#Y?_b6w5nsaroJ1%LOsYGWw z2Oe*H6+_PBv7Np6y$jmnlSojL&&<{i)@plz(+P=iSh78h_Y>XB&HP z>LQ+7SSWVs!I_%E9-O_1Kb7kjFXGZU{F#Ow8B3i+MSVl#2)!3yK7m4g@z~?V+E0+weu|^o&u}-_9@Bn~`?R<5 zfc6V~Lc5GlYQMxIyq?hBaV)yVwN=_ZkK?cLH;xY;$Jg<w_>Xvod((aV`zQP}?|y^Qi+|jE-{iQ!ZW>C=%Y3`|GA1Z}!P_w3<&LQtPzIhb z@Go+GbEScANnr5RP5yT`-|skE?qXm3zDxC?^NLVwl6{0SOOEyFe(vin4l#(SN*%w`8CPKRki*diUc`WH1vQ5)@g| zOkoWHy`0hvMX_FiA-oRLD^aUgVTwKiv-E1r)kpdyT;LgBB;>X|Y&b>vDW2qisXDIX zRYWTza7OpwXXo(03OUiCkM@x(OGn4g@wP`~mlx|}8mOGaMCC7f@k_y?kZd@IcM2Su zZhCRHO??6lInkl())`~c7#X7ql-Mh6Ql)hC(#b4U_~STG716U!b)=-S5>1*sO&CwA zBxe`W8+;n1HfSaN`IM(rkwav*N2J)3J(tdGAD!ZqC6uYkJWD)HOR%~?Q=la_%y4aS z92NYe$_w~(`O-<~jl#waP-myp7tuINFhXC9YJI80vfd?|Hd2#kB(hSKs)}k+xBLWMNy95Q4 zyM}U8<}v9qXd@*S;MO?NQt{5wbRBf&oZ>^W++s+-k+HHFmHHNp)VKQFSd(_+I5plg zNC$~a@*da6Ad;!6d|d%kv*_;IG4-3N`z_Rc7slwfIviYRk4x)3t}nDF4g%UsJzXoa zGJ}Yc!KWKldzqp-e9Bd(m7Am{d&)gbYU1tr*~)Q0E0@@yQ1gV8ybTsa>bd|@_YXv>q6Igr1s*AJpg>+S+`=4*c*8p) zI!ms<(j{#AdP}Bwy7R2Ux@;ge52;00yymEqvrtw^g{dAO4c0e3YRNz>kE)jUwtU%p zTfY1~Eyq0p_EIYbR(RDFui;zU-Woq*Ru9BxqiVhKC8^WxptQtAW-a9Hb3G1F+$e0Br_X!lGv*1HM3Q^77)1YBRs@N1589wy7O@ ziTWVlJCf@y>Q=Rz-55=UobO>?ts)c4c-_mi*P$Y6M^TA7pgQHs^XV`Z=X`}FY6q{M Y7)Z~y=R literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$ResourceUsageDefaultEntryHolder.class b/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$ResourceUsageDefaultEntryHolder.class new file mode 100644 index 0000000000000000000000000000000000000000..6d490a17634ffd44a6b360127ec6b104bfcc285d GIT binary patch literal 1971 zcmcIlTT|0O6#g~^(w1-)l$)SfwICAlt_20T&lFS&IQr5F=~4nAshh11Kf#|PKFIjs zlTZFA$CKsKA{iM*>`c0soOAZu^PTVH=dW+y0X)K_4voR~MbY$2+zYrR>|!B$|F zgfD$_ndVD8@XNyHIcdq@Rk)oxv7I_JS_!NopW^FQ*_G3t6gzXiTi`-R4MX#$^}#Y- z%PX2M*EYE=8H_@eDMN48_P5QV?-yOZk6^95ZZ23Q6)MA!$T>yNl4ZdeKK<2rG+Q|> zCxzn`GuiXFX!X_AWi!tRSA5iV9nX=^7-H#xRfd{LzrYh{K$DI}hLJNLZJ-$~4DTFI za^YESKA`kFcAf&|D`e#rS$RcPz8ab$D}Rg?mBW022exoZ(iaRvkr|vivItU@c_x7i zXxGukkd7khjU)JsFSae2nsK;WSl%gd0~c|Lp)R+)G`~2@Fc^7WlxHT6P6mW$b{psk zqv#9Mx!lC;bP~O|qN9&t<;+QqDBS9#DLxHcML$Dpk;{WRp7$vCo=qk9Ha+m4iYSQ` zZs@qq&=Zy2LkSqTi8O)p_{S6AexxJQTcPk6osqVYR8$FRU$&Wl@g0bCKD-}Q8I_5F zTGTN#WgU+%mbcfqShm(&%71Itw=H+o5>B{2*sL2>%1a^!)4**cNqus|2^jdfkabK% zCtDk-Kbcbva>fZUc2`%FlA-FE!(lt3hBxndTui!F5b!|9L%MAg-_fM$c+Ajuiao)p zc(hyfq0!iAbr=8dIHvF~k!q`P?YsW6S_HcO**S=ujGriDoi~jq`5n2f`?LF!= zfw&f+UXy6le6(oWXruX%vJO3{vJS$VG*Wj$?ci4oSHgvOplvA^W8Z)fkC0&ucU72s rxKHCCp?g4|-zVs#{`M53;|Yw>r{X!S8O_GgMLli|6SRJz{2G1(AqQmi literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$TaskResourceUsage$1.class b/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$TaskResourceUsage$1.class new file mode 100644 index 0000000000000000000000000000000000000000..d24815257884bb9879fbd4e7482064beac268f54 GIT binary patch literal 3039 zcmdT`|5F=96#wjT>9y$<2&GymT8gnvNa0G06&qR&7;H)sC;>YC!ErX1C0u*C%iL{h zb;iHP8Gow&KpmaY-~FSkZ;uddxWQq@{9tBs@9o>&x9@%4m)$@9{N*eoL|w@}@{H7|i!aGT^q{PLz(O@gTCrFL=r(*|)j7%L7M^yFy*=J0Zi<@_-!5GmN&C$z->5xVskkEruU5%7 z?#P-91aIYIR~27M!rk$`EzxuclyAz86#496JRtj%#h12dOFA$#Fvf5@&h(P^g1b&5 z3bpOKE1kVP5%e}DF^+c(Tw>T62+k(n#bvV86JM`%9Jiqogl7f>NxFMPK8Z<88Mwmm z^L(c%R{1O9V}Em_R@MZxZOpi3qJ}6W1`qVAfTXx+Ou?H>gn2p(&(Ie2D7| z*)s}~bF z++>*M;mLKyaF?KqQ-AWl2$y-)DJ_P}nesFKf?JMnl{R`Nd578owZ%rPV!)ukA& z#~yyHk1?b~f7fEj#cqyoZwyA1Gpe$`8ceI}Whw)syjYC2?q6_axD&hW#As8Gy%ZY) zh6{9o+RrPz9bH!j=)OzGg}Wbd1D`Qu<5tGP%OK*bpkMFlZlox6T<$fkjrucTOG-P2 zRTIM)p=Mo+UI=TBx|^>YdmThK$-L4)ov-FJJuW)|t^6()-?v`}%Y#Xf`~!;ANYS%h zKXkliMaX@>LCvh>c_LVKc^C@1A=k;p|KK_>P@&FoYDH9O$Dr(!MNciNr`}C@wkksT z&<`c1DLzB=%tQ30cNC2c8d*dc(ZuX)oc)m)4N3Z{X=p{9C*C|5-IwM z6zO#AFez4al6tM8S=^!-W!k(lN?V7nRd8ObVpQ9}q_&AE?J=_27IIn*i`q8wM3=Q) zwHgm2UkhCq$V}YEoye^M7V#0eqvJmA(j$0?97|#epO9pV9A3gb;&Noo67s|?leTNn z-;ja20cPO7flm!Gli1X{flSrFt+CMp%@_Jn!VOZdf?&)lgjQY=`wzIu9%ZXW{{%jd sxFXgf`UQEQ;2x)MiT*RhKSCK*OpxpmHi)Vys^26@Hfj8rcn#&h0QeZG#sB~S literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$TaskResourceUsage$Builder.class b/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$TaskResourceUsage$Builder.class new file mode 100644 index 0000000000000000000000000000000000000000..ea84d3a032ee757f74e921aaca52911c59ab86a7 GIT binary patch literal 9805 zcmcgxd3+Sr9sj<~ZiY=p$Uy=K5vqV;0|Z4;NJtwlT^f)QAcYp=WHU)tHZ$q&EQeaf z)~m&%wV>irt!<@V6;T4!D%w`9Xtijow%W_H_WZPewy6ES+1btPX0j&LHTmqm**EWZ z|9;>1n>UBwf9O#Fr{SLpd;%NIcz3|q9gQl}))+V%Wed?lQG}fud75D{aN6g;(Zqw|J=_$q?$*%e{bVHA8mfpES zPe_V&a|dXO1A_Ga+Y}3ykj7 zd(HT^u(4>HrOU}P*DsD~iA2Mo3Geb4mL9TN zte$9sf?5jVtTg0MP;q0FmNf|!)y`N~f+B=eoQPVwv39tDcCclY7S|1{A;o2uFK?qns@ zf3b=sa+zw+Fv_wHB>gfKXW&e34bklCU?y#lgv(WY7A*o(41LR5L#CNiK+e{;bZbQK zv!bRUu-ZG>oP`bEwUn2qOzpUypjQRj3X?b#aSYKYvT23ZXG6Vo%I9R_uNIi=ZIMU) zr}IY^j=&lfYq5@;-QJ!fu%Wg;#$OV zyq;mqIyS8yo$y)XiNEP6$;|Ch5yiOz<%EO9Sv{e>IV_ji8B@^<)@ylU@S2X=<%LJ+ z8uY37l2o&dYOZiMsZzg$3QOu&M*VVb)?l-WE!di(Tx)DH%q@n!$8(=OaR|6~FKg8; z!luA^D$d6RgiX5$+$+fFGVkcQS4lpFcN7a##YM8DgQl^#hmDGCT=sdZCtLH*u+f*a zTCKRQ^?LVsOSf9=jEL8(_3kM1ZCgv!(hF`1A6YURMMA+>1uE^{D{9t<&&u3UaH+s? z15Me=WGuGIwA)5h**%LN4vuW+LA7CS9 zY*DhSOOIzfqT&YZrnX6AOI+)7xl>>qE5gIi-17oanJ5C&JrO0>fg~u`a}!6P&Xd?X z!`KaXng&}Xv4Q@#WE2E{o#M$NzrhU(z9%rl%Tu1R72J`|H@nwg(-Swhq#pGGbZnAp zRNDP%E?`#Jvsn<7LM3-jBxK6mqv9^y%O=G;J>2V4$l;}Yp0Do)P7o;fkZE|;r>cjGsBm)q{`yg|w6X}M?Qhv+$n z+GwTqKaxB6c7Y2%DP>&lckTFFKVk7M(U*KuqI1F^6ELHO48@}|6lV)8@m46^_6#q* zB*Vw$bKyzEt^AptrO422MQ#F12~VhK#zJ}ZltctJ7$vRt3FNYO~Q#9Hh8RRpTy{F5}sqAykIgXZPF>D z;FrX>VGytpaGur_yeLpV3=(%nDEKvVVwgkEgm>lbsw=R_zi*gL)J5?2B;Oh z#$DWtVsNr3_&s;r;jK-(HFY4w{m1cH`gcXv$(sG-tl;$_B9K!m3f^Qbct<)3NnP$SmNd$`` zeggdV$%7}p-|)#}Cwt_f6UP~t02Rk@)oA{j%2EFL_hHOljy_cJyI9VOb5L#1@Giz- zyhG7!O8`puVM0Ysh3LoR0m2F{h3G<&=z$`-dCJvgGf{0ZrI?24HcK8TI_&0hlqv*5 z2QcG)`=->OgqlFa_}0X5F$lOA)Hw{AxRPs89I88jnYkR|2nfsOP@P)kaF906M!hYZ zIPUOT<=9qDWlnAj7465Vq2PX;Ccox~2F$Fchat9*_g0#?4P(T4d=qiLOVesc4v)t( zzMgGM^Y-<{E?R$vmc5gsS%&r?ng;Yp#pyCnTJ9YVt;SJ$s4~=#MJ;vxSb8Hy)g46m zP@QB|WQRNzxg9j=N~q#0zLB}c<+e&kr2vy=1y+*bS)?2VaTMhWj|#Z4X2eD9s$)yeXYNzV30VCb#GBV7=@?nYa_x;x3m9#%F|&hmJ0Cm!4;lm)YWEY-Jr^&%%@98X4E7J6KJ7 z>Fn;{PFpgs`mw2vGiGMS>_@y!MkLvfZEcLomt86yFOfss$EfT@g}5IR#RGf~@*rlh z+UJP_JOe!FQn4|kVk1>dCkHVoSYD2Dc_i#8Ig_KTb)Wwp`R#(5M{IT}l<+z1q&K&7 z)x})vC*4=1p<)NG88qyi`4mbY#wF_y;IczSO?z-IZMl4&zsi3%?*gm&k?A1+PT)sE zVJ$yp{4Dfy{Q_zHRkFC(GhzJOS?hZ+&6%8UPX}#7(x(cj9K;LT`?Sfp6nh`eqz7+(y6n>E!^vgWEYLb87}l z)T7k&C_=(NcIyqjO3ZzaMfbid534d}S78r#5SeU0$Y6U5 z1xWp>?a-FJe_;mP)4A`G;R~SDSBx^Bg0a3)F69Ssoo9}Y@@~uL=UDEma1jq!&NE4e zILmTZ+Id;-n}~pK62|yyQ0Y7FLoB~@D9hhVTRz`Y#xzvs_9BGvNv1hq#X{%H_GK7puJyi8h9~vaXpO`uL$;e55I#QBm&s6?dKE;~h zmCAWZc*yAGvM2M2xyw71^U^Hwv}RF0F-ttH8JdK9*w1a`c^Zf9@fkeJv4gwtFYr8$ z_=9)>zp}@d@Cts*N1!6SYXANn$3GOSn8NXooc|Nvz>(r0-oiWf_%1t!zjD3?@8WM9 U`Qi*mI0pFp?_Bo}TVC1!01HJU@&Et; literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$TaskResourceUsage.class b/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats$TaskResourceUsage.class new file mode 100644 index 0000000000000000000000000000000000000000..db9b3548bd578c517a325f6acb3aaff04006ce3a GIT binary patch literal 11572 zcmd5?33yc175;BB$;fDsR_oS9+qzq8ZU1}cW#-MBn?U>=zvla9-kW>x zIrl&RIrrTA&ddED-f}yDL&P)#SpuDrXm3qq&Fu}T3fB#-uUTq^t!U7;dRncR z9Jy+tfn0&(GXi6Iw0XwXpUTt@Oh-@tp$gZ5TO2F3;>9GnRu&;^f*qheWs{VX7?QfzCQw+=` zdl?~b=79y6fqms%vrX)UPcbxFLSd_IXkeWc?Fg>xCsm~_k#2@ykb%hGPUFH#ErIFl zWTq3#i>0v#k5{U;Y6%4kkc|T*JyjBEjfsiKlXnN2D2?AOG%*R2$)?>GiU}C&Mi$Wg z@xm0Cl4|ef!Gz7@)Fr`7==%y3hOJEjwPd@=X!{=1&>0?zSv@PFp_7<=7|6i^%VsRk zY1t4wDOl4V4ENS7U$?>Pw&gMyQiBDK*i)RNXlH)uwd_VK)*THE+7YIqd6jdtI+J5! zGn4m=^HGNe1Iz;ljyr5kG=kY;O(;x5!@>S8<~cjm-9<%R$%Nmf5`LFT_+4q4J*7ou z_O2e6HFL*fi@>xIC`&E7Ib`+s)OB}r7l@3kkHbeHN3#~v#T8cp6U*=!f#AqYACuAR zHE0)Wjo&k@23iG<8@EeSHrjT%Bd4?|2aTkO0xZX84XhA2Oqa{|2&ajaXlFv@>43RD zh4a*%s;knqjvD%dvDo6Y27Tb~5e}}&5-y~Ml)YBveQx2!>}hrHs9bfSRCwGVj$tbq z>)zNJ9CR|Tu_+p|t&WI5l|GHg4W(!KkUcnLx7$%GIG|E*LDR|I)}YN(M9jeP0+Un* z2}NoG%M(2XI05Sn1O;a4$WK#jq8r@s3lyS2*gp6mft<>~GMTXj)|==>U)<8p@Wyat zQ&{;!O(tq0h6w`Z$ZyWDW;U>S|<(# z`^i9=JM^3sKIa&@TbyR%RD4bnc!r4%bjrIkO{|i48*mzdMb>SQyRv2r$cfl}^4OTy z7mW2uL!|-dn)tFbpm@Z9qnu8u`KuStlGnV4B#9%LV3)W|Eg!hMlh` zXmhcZ#a-zkL13$1l{TuIPPUouxVl$by`h*L-Q2pyOjeso|!? z@>(QNJr2_aHtW-)j5FrNP`Edv^lX##Tq|(U9-z>eKV5683%kG-yVb1K@ySQZ4U&rO z0^DwO%g1GuwtqCh8rIfUuC32x%>TCN(CR|asp>E# z=Y*tbiJa__PUd7!$Nz!5C11akeEmw`)bSHDE(^xyuvB@g(drKka1-j0TfGE#pNadW zGpD6?d%P+~J}5`hjMMdIs1w{L6@m{-N+02~nI6kXtmzoh;M5IE2498N32ABsuF-=@ z8>!VdT0YH87LxvaTyppX51V?}i7za5g!#Ji=Mg6cf`x;r#5zXYbOs*nF)b} zrF@!H1;6#dp?*7Ard*S$wZMH=lESLIz&@jt0-U5EuxZQznXJ{z5=|UNB0ZF+^OWeD zF$z#8zpDo_xys)%@j6~*eQNCSBJMM9+&GjNSug!|tM@3>b4&<`Q<|hYvmrmT7D`oO1H7>{f_1JF(A= z*zZ=%;N7enQE>x1vQWwIBEmz~;V8&z!sM)@sYLe34_~11JFdK1soco1vahVFdMD;q zS&s&#sj|XIMS=-yacg6G-IhFQRmoIxE)88 ziqe*0v?T>gX$Wuuhg`(KxsX&|;4-Dqfx?m+IvsU21THDesk#aCuJzb)xl1@FA#b#IDFfP#vL8o867v$<2Nz3 zhxsmfr@34`7NM{$$~v^Bv1ZAAcREV)Jy~OE(@MZO^}8 zE6i%2yd6dtFZ1Iql9>6jWf)fVFha?gor+uv-^RGPorvz>I=T}xaTn&|ZWpox68=8` z0|-0FIB{tS?^(L%w_JNW9572c--)&UtV@yO-=R_00$1c$;8dGm z;v2@v?Dg-!iT)&c3rHDoFUh-)cMmYcA0&CdW*T@1`{NN*<55C>j7#cq9Em5;#En<3 z=s2-U6U5fz6qRBmv3bar#FkAWc)m+;On8_k$p7V7T3vOObY}IP5L{Mr<$r!#^&Jqc z1bO;gerS+%^DwsX*OtUDi@#{ZSt#D2Wa z;&nD&qB$?4hS!C>F2XA`=g-QV>2YWYZ!E^jFpC&$T7?hP-;L-n8;qf znsAJJkn%EER+Ig6wjxhnwiVB*EuQ4>+l8+T<7+3>7SHnc`FL66?~|79!Z+e=If=F{ z@wRDd08PC(?yqxP{)!v>8V&tB;rx@Pzs}HlgT8tb6?lso_Dvf2HvRPuQ_{OkChwpD z?_mZ0#fAC4%Xh0CV<+M$d^_PgZY?Hbb}KVpazT7Y#o!@qk;!1fc2rc|jPGSJszoxp zsN}|e86ENDH~~Mv5BW=~yCO|py+c`%R9Ag7uFTfy%XJ|xa`eeP=12H3!AQ;5q-pj5 z{fQQ6p$F(s9iZ2xsq+H8UJJCu161lu1$tweW)IMtv_Q)|KzBMoccrQG0=-!a)b9Z* z^`!#+nM-p#7aqnI>3VJ}6;-=%TZ-cg*a{$*n9Q>>;oE&3ki0?ZoqD3UQI44Ewc}4 zR3ApLhkC$1>M;9QnnEwLztLzsl3a<`yh`W$cHwv4<=*JR<#Q5B zzAP>|va^*W_uB2)Zx~N`#haD5Oq^2aSx0CQjdG$*3Ncwe=M@aN<}JjZn4v)tR<9ft z$2uyeRdM30FyU6T@Sys_)hKfAMwG^n5bDe-T49Jb{tsNMOZha%DCfXXvdm!B$S0p# zotau0$#PpOl#02Rz0^842{yhoI*>0qmCpD!Gjh_R5VxPdP*FbNF2Y~=osYlaRY$e! z__tqDr^>TgbuQ*!<6bawuKtHg)h@{jujJZFlu}gOLO!#URegHp|aSj^^ zm5Ie>Chuit-~RUV>-_`36^iLsBeeHI#$>@JHM5+;tXiv{Hkwzt3YDZ#_O=xvy2o@w}P(oUleH+>V0d; z$C+U!j&Pt5{{xB$ErrWJFOp|&g;hBDt!;8us0@k2jpHCFg@~Lz`PP!)j!;2?y;@Xp bYK4w{)TMT$YoLiXj%A{OPPvN1wcq>(S}3Dy literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats.class b/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStats.class new file mode 100644 index 0000000000000000000000000000000000000000..40849a2553a5088388e941e2c482474c29f92921 GIT binary patch literal 15186 zcmcgz3wTuJnSQ@B6HYEC+%JZJqJoA5h=`X$A{t155eS$N6j9@l9Fl>_Oq`jhcq!JF zT5r9mtrc6_?L}P6b`>>1X}xs2YinC~w{GqBy0v?0x7}_pdug||y6<<+nVB<_lSq69 zd>%6Aod5s6|NSoi`+xs`jxW9Og=Ya=D36#h1gn#&zQ*LBok-i!R8N0nCYs)zZX8S{ zGs(s+yza2m$)Qw_-Is0r|ncM8sBQC(y?TsE7B^EO9j=dlZkXDn#gR8 z#)s@O8twcinBCFVxv8sTbz4_w%i6Z?HIcRrt=;Wi8&|b;P%;u?HxW!sd8T(M@@~PY z8-h&S7#&;_v*W!@f{M;qUm}_rO4))t$L!p^VOR9}Xk$E@=xglEq+*G_rc(H%AT$o_ zrj^7sW56Cr#%{8EBk4#YmZ7WkhNiBf?^@f|v~+FQ(j95<+|ts%x=qkl3Md?Q!seEa zPIc*=KwoT*rWiN&>lvfDxg=QB3FXZ_@ffXNDG1dq+$spKPWIZBU|cPnhFaoncG@fW z=yVG+Fw?}D^hODCb^g*yoPkqS^H~;V;S|B74Y7pXJ~XhyPHlrNzH#bOpbGO;K~=)OA>vH%9(u3yxJ~X#45jVf&8gV+Bqk{iKparcK+OS4&icd#&O>!7cFmDu3=Jc3YN7BzTZpTm}6C1GGZtSrK zGh8uDaJ86~Z+98ln^5K0K;cN@da`Ekyn*Q8yq!w-^AgV7T`{i`5wz>1*+inP^CgkT z>pHC>xXeNaI+;+>L9Vsr-Z^UhC_~cl2)Zn6#pUF!Ow!dPm`+2BMc8*O+Ns#Z!#v|k z-Lspvdkiwy=QwAfYU%0WVw}v<&PAhmFMwi3iaTj}jfHMpE7+7B>22kgB>wx%dC~?#ZD9Cn&m-tz;U6n(1(7(X)L!`B^3CxaB1%LXVU6; zG@Wkph2DJ-JbR7{2dVAuQR1GRH7Bz~(dA~hg-MuPfdM2lAM1z# zQs{1bPbK!DU6I^v;U3(}^*lS?dYd2$sMK4J7hXv-DXO(nek6})S;`iBMo`&m52oxM z-GK;hDR#ukOmRVtx9n_v|#Un#i!1Q}10IahS6P%ul$0XG#sIk|Z}Yh=o8=}#reb-Vqs$(BDS$T=n+ z6P%HsHIaDS?u*7-Qhh@Mb|RDGoJt(f%|dmoaDC+#kYr-skcwsOElELrFfHpAv3Ju@ zW^gD&d+g|dv%RX!X-}JYf>YCh5lc2kHsxA$lXA$yQ+S$nn|nS%R&|}inX6;=9q*Wd zRS=ap^+Rxt#8!7biD>;P7`AX2M_i8VO6*P~Z%8=OFSrUN9p`bBKqE%_m+KYttJ68q(Nl^}{?YQ_1q)wBHP`CwS%x+!J$%K& zU*m6BCo}4)Jp@|3;wknjd=)R5_*%h=#|8^u$2Ta;EZ{MfZp&sCED{m#ITgH&zccZ- zTzc}8G23e4@9{0Jn+f|yj$T-$=lJL!vU!qSZRa}ys>G}KM-$%}y_#^kS@`Vr*y+cgRJZh+m1g5mtX#-QLX<|fl?bZpE+!xR*7s7<94la66RTv8%9H^wN=zPiFe{?6{mk>DR!MWTeuP zDkYi8S&~`jHIhQ5+QM?IsFX=E#gxef)5g_mNjdhAl4P1C)183F(rsG(E2Jq3X$na~ z$Kz4)pq_rNNhJpaS9$X#!4+iHo{YU}Xy;DnK&k}gOOq+Jf^&ni-8<57&h&edad*_* z`c)-!o>+2z5BEkh(OkY4nJl;?SXE%cwcVKEKAHt| zhus%TllA*w77&_4E|K$4vlKa`&S+3) zEE1d-T(lI78DQ+ZK*M}eg8SZ5bD)kBodxHp;Y$TPkqMdzJ`)1lM}rsOqe$KMRiz8q zd5ZgdEofA0z;MQPzW&LubzHbhl~2nR-U>m>cmV`hk|j{Bz!Sbwy(G!6E6YWetdxsc z+BvL!c`VcKtMmn%gHyxhNblTk1h$}L@@faJ`(-gTB*&9Q(s`aNYB=ekRPVH^ciIFu zkKYaB!aKIdraG}{wd1h?mR`NOv?{oDmP9mka|@e`VyHdWYfqXn5Nw819G;l;YF7tt z5=4VX&gM~&{Q;VKYV36bT*tiTB z+qs*#6)S;>xgDF9otIm3g>>;KGr#rI$5&diT_0CulHP_;@IYNDMlVg#;Iknr*I3dm zR}t)ho$B+d-U{bMV}@JUN<_grb6;>Q1KwwN1~basJ)Ho!y?mV~+~O^NyNLJ-eggT1 z==d%^Fu&f?D#6B51Tz-~ZiBd=_Rfa$ug%w%CS$o`slhzw;!A*0&Lg}@DX_=vP`WQZ z^nHLkXzr{;kIRdEkZU`06j=TZz0SOpB}w77^0cx0(@nKeU-4L^pHAm9ur)FLRHfE`DDJ!@b#{?Uv^L`rT_nM;zMOZI zcG{G^JY5Z<&HTp={tq2Yc^6X6(UJK0YuOnDE#rU;T4eUkAo#v>6^?%<|oe9}xR zXKGg$5D{ls!3$mI{kti53obfN^}*kcu!Ik8Fy;NDoW&M}&y)``WhnV3Y`X+O#t(*fu?n<&ouxT%mNqD@AWYI{n@EvPDwPYZp`Y3$7`}>gosNsP=tI zIQzI8owWB!mbcsK%(`ScgQYyK4)I?ZCSW#9l=HfR-xM0CL=7xd=k6!+8DDzyeu~pR zmG}JSir2IC`!na4UyJ41YtTq-vr)?@bJ%(j`|E!*jzP8^#@wec?-Th;7K+Z zIGcYb5)Q)mqcZ$3riVX|I;TZj`KcHGx`Wp{D!1`ol~&a^97aRE@eCHLqEKEDT2N*j zr7f!*MKj&*M5cHth;tkdoXcyAINc}9c%{Q-kvH>B+bZi1P6E8a>nVG|hMV(^H4gRIFuF z&S_!Vzsi%?%NXm}OKTtJUocYqa@6}bt$rA*pE!oK+m0c!ZR+~N*jPJ^&BNGo7*{;U zPOeL5QU&;>&vaZtFjrzOwqr4WHAb#$m*zk(#Wp8-u4G#n#`G}%ZpT#~m=-7L=#PuZn2|dey{ONxRo)PWerrXG+=PfZ3DH6IYSXm(r?KlrR31Y7 z2vSezovL@OaMT+&!!&M1rEv=;8hd@}r+MlXHz&qK>bntpJlGd{`m5Dn!?@`v-YI-{ zR1ietcAvItPn+P~c#o&#YU8veIEI2 zMgztZM1F|KpCoefq46~8jA6%udF~K73zm5n%;cm~+=dUgKKTrvDDDav#9c$&4E^Ky zoQHWAn>FWZI29H**M>idvhcy${U{3^tPhhm!#(GZw|tw*(LT>EVcz|v6YD6i6~T-KO%_hf@DV)8%V!#f zaa38`<-t=q<6(RaV!T8IUq`v|4OAFkC6aGqy796P#f+S9X5dSBmIH~trs7|wmNd{B zC95gg7yPgzW6Ay00vo%frKAL26ON7!b(iXFy}5jS4Lb;eJ8shrNa zJi+gtbWvW6Mff9Lr`t9&=zqd>98*(SL*%x-k!yRCZJHc25z-L7$&0Ybik!ZF8C;%t zg?nAc;Si-XyPF#7AJrsjcmc8>6U1_!;T;=XAg?7{y!^QPDjq_){v4dVuzB*d+WsSA4a-Ev$zyMs$^c^4 z_zux#0 zmKtwhB^|gv#Ix3r;Y-*K&!kCMEwgfKBc~1R;F)~|&zNfn-J3Qt+nF{O@`>G`^%Vkb%4_x*b2*=A=vgBzs6Qgj1%Y?Uf>0eQLudP-R>>vOLJ**RJRc9B}HI2d!D^)gu-Iab_8f#N3d&)z^cZ)XGPCBJQ`cVUk!fvh-@vM z?i+l#!XBwq=)tQed$uD9n1?WLShf|5w>#p>0xFcP$j{Gbn+nwr%hkn#m-)u6l55Gu zAocQO*BkdLPsQAN4*aOrw$W{eC3*l8y{oKJmqneOUZ@LZXe;LMS9cLEht2hjQlck6 zPPx?N4j`t(J2bu62hGtSD_+wwWHH5rLx-w3dv6Cqav0{ zI7j;Vw?bmF%Tw*!Sj{VPK=?tQ=XOh+lFsHs1{|vPNiHasd@?5~k{BVG3pOWav7}oL zx_-$9Tl7%R!|RhBf%lltr=INO7RnBKvJ3dDNybfpTvs$O8@#QzBa$AUNGpSt@kGv| zpETPh)He*v&=|u|=7xY_ZX8?Wrg4feVS`2~o+i|qw|~0?jWW6j-}>B4iw(I&ZsqUo zm@My<+vIlC$h+CLFI*w-zpF<-`0HG|NY{7@}VNFUl#nkM{%oeUQg3+^fh(sgHXsd6@mn*#BeH^Ktey(ER^+ Ci&Dt| literal 0 HcmV?d00001 diff --git a/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStatsOrBuilder.class b/server/bin/main/org/opensearch/tasks/proto/TaskResourceStatsProto$TaskResourceStatsOrBuilder.class new file mode 100644 index 0000000000000000000000000000000000000000..a980cc969a9ac79160a6ddc09a2d79fe434c1d05 GIT binary patch literal 1364 zcmcIkOH&g;5dH=kL4@pbp^6-)dZzp9=XCe)Kc{B^-l7pbFR+-BzjV1blyyw5y})F%waL=NXfKs^BM{~{x2~M_yj41Fx4v^IMQZ}H z&DI4r_bQ9G<)AH4+n|A^M0(m6sPHF+ zQ3^J~3xT~`n;Plnus23Hp}!06{_*Oi5U#C;sW0UGXX89?6J7{>?(-h2@WzQSh2iF@ zq88`_OtbtUOMypxCh!JC}ncWIf3tol$p zbws4t5fLepyW8>Mn>FhC(%j323!PRQoqPw%fVQ#ifpA0rp~&5#A{8i+%AB+hmur*Y zp;omixs;hU7R9(Vsav*7>R31sYEc^(#iH$G$F1S4C)~Cg|F}J)hANRm4Qk+$$V$oj z(4<_vv<@Yz^#0Osmo=3k5ObVAah>WbybU;Hr z>Vh~#wDT}O3KXV_51FBJit^rN+oCZc!IpxPLSt@`~y z^R>khX(}V(95&6d%N{*G4BH~KNcG4USWZeYCmS<7G33wXU=GO1GIUo>T6IyXzx&ig zTKrx+tKk$#->&;pLN2Ot`E2rM2E*p+^D5$34udvs(ZgcI)LmD&3g~28wHhzh97Rek z{-Vk$A7u(i<<4plm2E=ZUHOJlaFJnT)rsw>@RFf&r75^X)wu@XQ;r%kxsOs;2qP6s^8S4mVJ-~L7G^5iu_#K311EG!aiLSsMwBAPB4Q#oM ztv9hF@D6s?-~;sBK;%92&R$0(&)pxtg{N*{H=W$X-V(n3Ille1In;ykSspr^V;;#d z^CIY2j(IH4eAH(i&oNKDO}5qGIQ?B9Pr*My9sd;dL{}$@({Epx z{Z~*e5u`rA^LfG9yx<_woYvboQxtBJ< actionPlugins; + private final List protobufActionPlugins; // The unmodifiable map containing OpenSearch and Plugin actions // This is initialized at node bootstrap and contains same-JVM actions // It will be wrapped in the Dynamic Action Registry but otherwise // remains unchanged from its prior purpose, and registered actions // will remain accessible. private final Map> actions; + private final Map> protobufActions; // A dynamic action registry which includes the above immutable actions // and also registers dynamic actions which may be unregistered. Usually // associated with remote action execution on extensions, possibly in // a different JVM and possibly on a different server. private final DynamicActionRegistry dynamicActionRegistry; + private final ProtobufDynamicActionRegistry protobufDynamicActionRegistry; private final ActionFilters actionFilters; + private final ProtobufActionFilters protobufActionFilters; private final AutoCreateIndex autoCreateIndex; private final DestructiveOperations destructiveOperations; private final RestController restController; @@ -534,11 +557,16 @@ public ActionModule( this.clusterSettings = clusterSettings; this.settingsFilter = settingsFilter; this.actionPlugins = actionPlugins; + this.protobufIndexNameExpressionResolver = null; + this.protobufActionPlugins = new ArrayList<>();; + this.protobufActions = new HashMap>(); + this.protobufActionFilters = setupProtobufActionFilters(this.protobufActionPlugins);; this.threadPool = threadPool; this.extensionsManager = extensionsManager; actions = setupActions(actionPlugins); actionFilters = setupActionFilters(actionPlugins); dynamicActionRegistry = new DynamicActionRegistry(); + protobufDynamicActionRegistry = new ProtobufDynamicActionRegistry(); autoCreateIndex = new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver, systemIndices); destructiveOperations = new DestructiveOperations(settings, clusterSettings); Set headers = Stream.concat( @@ -566,6 +594,75 @@ public ActionModule( restController = new RestController(headers, restWrapper, nodeClient, circuitBreakerService, usageService, identityService); } + public ActionModule( + Settings settings, + IndexNameExpressionResolver indexNameExpressionResolver, + ProtobufIndexNameExpressionResolver protobufIndexNameExpressionResolver, + IndexScopedSettings indexScopedSettings, + ClusterSettings clusterSettings, + SettingsFilter settingsFilter, + ThreadPool threadPool, + List actionPlugins, + NodeClient nodeClient, + List protobufActionPlugins, + ProtobufNodeClient protobufNodeClient, + CircuitBreakerService circuitBreakerService, + UsageService usageService, + SystemIndices systemIndices + ) { + this.settings = settings; + this.indexNameExpressionResolver = indexNameExpressionResolver; + this.protobufIndexNameExpressionResolver = protobufIndexNameExpressionResolver; + this.indexScopedSettings = indexScopedSettings; + this.clusterSettings = clusterSettings; + this.settingsFilter = settingsFilter; + this.protobufActionPlugins = protobufActionPlugins; + this.threadPool = threadPool; + this.actionPlugins = actionPlugins; + actions = setupActions(actionPlugins); + actionFilters = setupActionFilters(actionPlugins); + protobufActions = setupProtobufActions(protobufActionPlugins); + protobufActionFilters = setupProtobufActionFilters(protobufActionPlugins); + dynamicActionRegistry = new DynamicActionRegistry(); + protobufDynamicActionRegistry = new ProtobufDynamicActionRegistry(); + autoCreateIndex = new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver, systemIndices); + destructiveOperations = new DestructiveOperations(settings, clusterSettings); + Set headers = Stream.concat( + actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()), + Stream.of(new RestHeaderDefinition(Task.X_OPAQUE_ID, false)) + ).collect(Collectors.toSet()); + UnaryOperator restWrapper = null; + for (ActionPlugin plugin : actionPlugins) { + UnaryOperator newRestWrapper = plugin.getRestHandlerWrapper(threadPool.getThreadContext()); + if (newRestWrapper != null) { + logger.debug("Using REST wrapper from plugin " + plugin.getClass().getName()); + if (restWrapper != null) { + throw new IllegalArgumentException("Cannot have more than one plugin implementing a REST wrapper"); + } + restWrapper = newRestWrapper; + } + } + UnaryOperator protobufRestWrapper = null; + for (ProtobufActionPlugin plugin : protobufActionPlugins) { + UnaryOperator newRestWrapper = plugin.getRestHandlerWrapper(threadPool.getThreadContext()); + if (newRestWrapper != null) { + logger.debug("Using REST wrapper from plugin " + plugin.getClass().getName()); + if (protobufRestWrapper != null) { + throw new IllegalArgumentException("Cannot have more than one plugin implementing a REST wrapper"); + } + protobufRestWrapper = newRestWrapper; + } + } + mappingRequestValidators = new RequestValidators<>( + actionPlugins.stream().flatMap(p -> p.mappingRequestValidators().stream()).collect(Collectors.toList()) + ); + indicesAliasesRequestRequestValidators = new RequestValidators<>( + actionPlugins.stream().flatMap(p -> p.indicesAliasesRequestValidators().stream()).collect(Collectors.toList()) + ); + + restController = new RestController(headers, restWrapper, nodeClient, protobufRestWrapper, protobufNodeClient, circuitBreakerService, usageService); + } + public Map> getActions() { return actions; } @@ -765,12 +862,47 @@ public void reg return unmodifiableMap(actions.getRegistry()); } + static Map> setupProtobufActions(List actionPlugins) { + // Subclass NamedRegistry for easy registration + class ActionRegistry extends NamedRegistry> { + ActionRegistry() { + super("action"); + } + + public void register(ProtobufActionPlugin.ActionHandler handler) { + register(handler.getAction().name(), handler); + } + + public void register( + ProtobufActionType action, + Class> transportAction, + Class... supportTransportActions + ) { + register(new ProtobufActionPlugin.ActionHandler<>(action, transportAction, supportTransportActions)); + } + } + ActionRegistry actions = new ActionRegistry(); + + actions.register(ProtobufMainAction.INSTANCE, ProtobufTransportMainAction.class); + actions.register(ProtobufNodesInfoAction.INSTANCE, ProtobufTransportNodesInfoAction.class); + actions.register(ProtobufNodesStatsAction.INSTANCE, ProtobufTransportNodesStatsAction.class); + actions.register(ProtobufClusterStateAction.INSTANCE, ProtobufTransportClusterStateAction.class); + + return unmodifiableMap(actions.getRegistry()); + } + private ActionFilters setupActionFilters(List actionPlugins) { return new ActionFilters( Collections.unmodifiableSet(actionPlugins.stream().flatMap(p -> p.getActionFilters().stream()).collect(Collectors.toSet())) ); } + private ProtobufActionFilters setupProtobufActionFilters(List actionPlugins) { + return new ProtobufActionFilters( + Collections.unmodifiableSet(actionPlugins.stream().flatMap(p -> p.getActionFilters().stream()).collect(Collectors.toSet())) + ); + } + public void initRestHandlers(Supplier nodesInCluster) { List catActions = new ArrayList<>(); Consumer registerHandler = handler -> { @@ -980,9 +1112,38 @@ public void initRestHandlers(Supplier nodesInCluster) { } } + public void initProtobufRestHandlers() { + List catActions = new ArrayList<>(); + Consumer registerHandler = handler -> { + if (handler instanceof ProtobufAbstractCatAction) { + catActions.add((ProtobufAbstractCatAction) handler); + } + restController.registerProtobufHandler(handler); + }; + + // CAT API + registerHandler.accept(new ProtobufRestNodesAction()); + + // for (ActionPlugin plugin : actionPlugins) { + // for (ProtobufActionPlugin handler : plugin.getRestHandlers( + // settings, + // restController, + // clusterSettings, + // indexScopedSettings, + // settingsFilter, + // indexNameExpressionResolver, + // nodesInCluster + // )) { + // registerHandler.accept(handler); + // } + // } + registerHandler.accept(new ProtobufRestCatAction(catActions)); + } + @Override protected void configure() { bind(ActionFilters.class).toInstance(actionFilters); + bind(ProtobufActionFilters.class).toInstance(protobufActionFilters); bind(DestructiveOperations.class).toInstance(destructiveOperations); bind(new TypeLiteral>() { }).toInstance(mappingRequestValidators); @@ -1011,6 +1172,27 @@ protected void configure() { // register dynamic ActionType -> transportAction Map used by NodeClient bind(DynamicActionRegistry.class).toInstance(dynamicActionRegistry); + + // register ActionType -> transportAction Map used by NodeClient + @SuppressWarnings("rawtypes") + MapBinder protobufTransportActionsBinder = MapBinder.newMapBinder( + binder(), + ProtobufActionType.class, + ProtobufTransportAction.class + ); + for (ProtobufActionPlugin.ActionHandler action : protobufActions.values()) { + // bind the action as eager singleton, so the map binder one will reuse it + bind(action.getTransportAction()).asEagerSingleton(); + protobufTransportActionsBinder.addBinding(action.getAction()).to(action.getTransportAction()).asEagerSingleton(); + for (Class supportAction : action.getSupportTransportActions()) { + bind(supportAction).asEagerSingleton(); + } + } + + // register dynamic ActionType -> transportAction Map used by NodeClient + bind(ProtobufDynamicActionRegistry.class).toInstance(protobufDynamicActionRegistry); + + } public ActionFilters getActionFilters() { @@ -1021,6 +1203,10 @@ public DynamicActionRegistry getDynamicActionRegistry() { return dynamicActionRegistry; } + public ProtobufDynamicActionRegistry getProtobufDynamicActionRegistry() { + return protobufDynamicActionRegistry; + } + public RestController getRestController() { return restController; } diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java b/server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java index 4bae1f6c3bb04..ffb013af87251 100644 --- a/server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java +++ b/server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java @@ -10,7 +10,7 @@ import com.google.protobuf.CodedInputStream; import org.opensearch.common.io.stream.ProtobufWriteable; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ProtobufTransportResponseHandler; import org.opensearch.transport.ProtobufTransportException; import org.opensearch.transport.ProtobufTransportResponse; @@ -43,7 +43,7 @@ public ProtobufActionListenerResponseHandler( } public ProtobufActionListenerResponseHandler(ActionListener listener, ProtobufWriteable.Reader reader) { - this(listener, reader, ProtobufThreadPool.Names.SAME); + this(listener, reader, ThreadPool.Names.SAME); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java new file mode 100644 index 0000000000000..223520853fafe --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java @@ -0,0 +1,128 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.info; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.action.ProtobufFailedNodeException; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.nodes.ProtobufTransportNodesAction; +import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.node.ProtobufNodeService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ProtobufTransportRequest; +import org.opensearch.transport.ProtobufTransportService; + +import java.io.IOException; +import java.util.List; +import java.util.Set; + +/** + * Transport action for OpenSearch Node Information +* +* @opensearch.internal +*/ +public class ProtobufTransportNodesInfoAction extends ProtobufTransportNodesAction< + ProtobufNodesInfoRequest, + ProtobufNodesInfoResponse, + ProtobufTransportNodesInfoAction.NodeInfoRequest, + ProtobufNodeInfo> { + + private final ProtobufNodeService nodeService; + + @Inject + public ProtobufTransportNodesInfoAction( + ThreadPool threadPool, + ClusterService clusterService, + ProtobufTransportService transportService, + ProtobufNodeService nodeService, + ProtobufActionFilters actionFilters + ) { + super( + NodesInfoAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + ProtobufNodesInfoRequest::new, + NodeInfoRequest::new, + ThreadPool.Names.MANAGEMENT, + ProtobufNodeInfo.class + ); + this.nodeService = nodeService; + } + + @Override + protected ProtobufNodesInfoResponse newResponse( + ProtobufNodesInfoRequest nodesInfoRequest, + List responses, + List failures + ) { + return new ProtobufNodesInfoResponse(new ProtobufClusterName(clusterService.getClusterName().value()), responses, failures); + } + + @Override + protected NodeInfoRequest newNodeRequest(ProtobufNodesInfoRequest request) { + return new NodeInfoRequest(request); + } + + @Override + protected ProtobufNodeInfo newNodeResponse(CodedInputStream in) throws IOException { + return new ProtobufNodeInfo(in); + } + + @Override + protected ProtobufNodeInfo nodeOperation(NodeInfoRequest nodeRequest) { + ProtobufNodesInfoRequest request = nodeRequest.request; + Set metrics = request.requestedMetrics(); + return nodeService.info( + metrics.contains(NodesInfoRequest.Metric.SETTINGS.metricName()), + metrics.contains(NodesInfoRequest.Metric.OS.metricName()), + metrics.contains(NodesInfoRequest.Metric.PROCESS.metricName()), + metrics.contains(NodesInfoRequest.Metric.JVM.metricName()), + metrics.contains(NodesInfoRequest.Metric.THREAD_POOL.metricName()), + metrics.contains(NodesInfoRequest.Metric.TRANSPORT.metricName()), + metrics.contains(NodesInfoRequest.Metric.HTTP.metricName()), + metrics.contains(NodesInfoRequest.Metric.PLUGINS.metricName()), + metrics.contains(NodesInfoRequest.Metric.INGEST.metricName()), + metrics.contains(NodesInfoRequest.Metric.AGGREGATIONS.metricName()), + metrics.contains(NodesInfoRequest.Metric.INDICES.metricName()), + metrics.contains(NodesInfoRequest.Metric.SEARCH_PIPELINES.metricName()) + ); + } + + /** + * Inner Node Info Request + * + * @opensearch.internal + */ + public static class NodeInfoRequest extends ProtobufTransportRequest { + + ProtobufNodesInfoRequest request; + + public NodeInfoRequest(CodedInputStream in) throws IOException { + super(in); + request = new ProtobufNodesInfoRequest(in); + } + + public NodeInfoRequest(ProtobufNodesInfoRequest request) { + this.request = request; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + request.writeTo(out); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java new file mode 100644 index 0000000000000..b12bd7ac0a4d1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java @@ -0,0 +1,132 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.stats; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.action.ProtobufFailedNodeException; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.nodes.ProtobufTransportNodesAction; +import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.node.ProtobufNodeService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ProtobufTransportRequest; +import org.opensearch.transport.ProtobufTransportService; + +import java.io.IOException; +import java.util.List; +import java.util.Set; + +/** + * Transport action for obtaining OpenSearch Node Stats +* +* @opensearch.internal +*/ +public class ProtobufTransportNodesStatsAction extends ProtobufTransportNodesAction< + ProtobufNodesStatsRequest, + ProtobufNodesStatsResponse, + ProtobufTransportNodesStatsAction.NodeStatsRequest, + ProtobufNodeStats> { + + private final ProtobufNodeService nodeService; + + @Inject + public ProtobufTransportNodesStatsAction( + ThreadPool threadPool, + ClusterService clusterService, + ProtobufTransportService transportService, + ProtobufNodeService nodeService, + ProtobufActionFilters actionFilters + ) { + super( + NodesStatsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + ProtobufNodesStatsRequest::new, + NodeStatsRequest::new, + ThreadPool.Names.MANAGEMENT, + ProtobufNodeStats.class + ); + this.nodeService = nodeService; + } + + @Override + protected ProtobufNodesStatsResponse newResponse(ProtobufNodesStatsRequest request, List responses, List failures) { + return new ProtobufNodesStatsResponse(new ProtobufClusterName(clusterService.getClusterName().value()), responses, failures); + } + + @Override + protected NodeStatsRequest newNodeRequest(ProtobufNodesStatsRequest request) { + return new NodeStatsRequest(request); + } + + @Override + protected ProtobufNodeStats newNodeResponse(CodedInputStream in) throws IOException { + return new ProtobufNodeStats(in); + } + + @Override + protected ProtobufNodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { + ProtobufNodesStatsRequest request = nodeStatsRequest.request; + Set metrics = request.requestedMetrics(); + return nodeService.stats( + request.indices(), + ProtobufNodesStatsRequest.Metric.OS.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.PROCESS.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.JVM.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.THREAD_POOL.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.FS.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.TRANSPORT.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.HTTP.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.BREAKER.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.SCRIPT.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.DISCOVERY.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.INGEST.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.ADAPTIVE_SELECTION.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.SCRIPT_CACHE.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.INDEXING_PRESSURE.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.SHARD_INDEXING_PRESSURE.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.SEARCH_BACKPRESSURE.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.CLUSTER_MANAGER_THROTTLING.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.WEIGHTED_ROUTING_STATS.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.FILE_CACHE_STATS.containedIn(metrics) + ); + } + + /** + * Inner Node Stats Request + * + * @opensearch.internal + */ + public static class NodeStatsRequest extends ProtobufTransportRequest { + + ProtobufNodesStatsRequest request; + + public NodeStatsRequest(CodedInputStream in) throws IOException { + super(in); + request = new ProtobufNodesStatsRequest(in); + } + + NodeStatsRequest(ProtobufNodesStatsRequest request) { + this.request = request; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + super.writeTo(out); + request.writeTo(out); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java new file mode 100644 index 0000000000000..b8a663c409904 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java @@ -0,0 +1,212 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.state; + +import com.google.protobuf.CodedInputStream; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.clustermanager.ProtobufTransportClusterManagerNodeReadAction; +import org.opensearch.cluster.ProtobufClusterState; +import org.opensearch.cluster.ProtobufClusterStateObserver; +import org.opensearch.cluster.NotClusterManagerException; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.ProtobufIndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.Metadata.Custom; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.node.NodeClosedException; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ProtobufTransportService; + +import java.io.IOException; +import java.util.function.Predicate; +import java.util.Map; + +/** + * Transport action for obtaining cluster state + * + * @opensearch.internal + */ +public class ProtobufTransportClusterStateAction extends ProtobufTransportClusterManagerNodeReadAction { + + private final Logger logger = LogManager.getLogger(getClass()); + + static { + final String property = System.getProperty("opensearch.cluster_state.size"); + if (property != null) { + throw new IllegalArgumentException("opensearch.cluster_state.size is no longer respected but was [" + property + "]"); + } + } + + @Inject + public ProtobufTransportClusterStateAction( + ProtobufTransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ProtobufActionFilters actionFilters, + ProtobufIndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + ClusterStateAction.NAME, + false, + transportService, + clusterService, + threadPool, + actionFilters, + ProtobufClusterStateRequest::new, + indexNameExpressionResolver + ); + } + + @Override + protected String executor() { + // very lightweight operation in memory, no need to fork to a thread + return ThreadPool.Names.SAME; + } + + @Override + protected ProtobufClusterStateResponse read(CodedInputStream in) throws IOException { + return new ProtobufClusterStateResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(ProtobufClusterStateRequest request, ProtobufClusterState state) { + // cluster state calls are done also on a fully blocked cluster to figure out what is going + // on in the cluster. For example, which nodes have joined yet the recovery has not yet kicked + // in, we need to make sure we allow those calls + // return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + return null; + } + + @Override + protected void clusterManagerOperation( + final ProtobufClusterStateRequest request, + final ProtobufClusterState state, + final ActionListener listener + ) throws IOException { + + final Predicate acceptableClusterStatePredicate = request.waitForMetadataVersion() == null + ? clusterState -> true + : clusterState -> clusterState.metadata().version() >= request.waitForMetadataVersion(); + + final Predicate acceptableClusterStateOrNotMasterPredicate = request.local() + ? acceptableClusterStatePredicate + : acceptableClusterStatePredicate.or(clusterState -> clusterState.nodes().isLocalNodeElectedClusterManager() == false); + + if (acceptableClusterStatePredicate.test(state)) { + ActionListener.completeWith(listener, () -> buildResponse(request, state)); + } else { + assert acceptableClusterStateOrNotMasterPredicate.test(state) == false; + new ProtobufClusterStateObserver(state, clusterService, request.waitForTimeout(), logger, threadPool.getThreadContext()) + .waitForNextChange(new ProtobufClusterStateObserver.Listener() { + + @Override + public void onNewClusterState(ProtobufClusterState newState) { + if (acceptableClusterStatePredicate.test(newState)) { + ActionListener.completeWith(listener, () -> buildResponse(request, newState)); + } else { + listener.onFailure( + new NotClusterManagerException( + "cluster-manager stepped down waiting for metadata version " + request.waitForMetadataVersion() + ) + ); + } + } + + @Override + public void onClusterServiceClose() { + listener.onFailure(new NodeClosedException(clusterService.localNode())); + } + + @Override + public void onTimeout(TimeValue timeout) { + try { + listener.onResponse(new ProtobufClusterStateResponse(state.getClusterName(), null, true)); + } catch (Exception e) { + listener.onFailure(e); + } + } + }, acceptableClusterStateOrNotMasterPredicate); + } + } + + private ProtobufClusterStateResponse buildResponse(final ProtobufClusterStateRequest request, final ProtobufClusterState currentState) { + logger.trace("Serving cluster state request using version {}", currentState.version()); + ProtobufClusterState.Builder builder = ProtobufClusterState.builder(currentState.getClusterName()); + builder.version(currentState.version()); + builder.stateUUID(currentState.stateUUID()); + + if (request.nodes()) { + builder.nodes(currentState.nodes()); + } + if (request.routingTable()) { + if (request.indices().length > 0) { + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + String[] indices = indexNameExpressionResolver.concreteIndexNames(currentState, request); + for (String filteredIndex : indices) { + if (currentState.routingTable().getIndicesRouting().containsKey(filteredIndex)) { + routingTableBuilder.add(currentState.routingTable().getIndicesRouting().get(filteredIndex)); + } + } + builder.routingTable(routingTableBuilder.build()); + } else { + builder.routingTable(currentState.routingTable()); + } + } + if (request.blocks()) { + builder.blocks(currentState.blocks()); + } + + Metadata.Builder mdBuilder = Metadata.builder(); + mdBuilder.clusterUUID(currentState.metadata().clusterUUID()); + mdBuilder.coordinationMetadata(currentState.coordinationMetadata()); + + if (request.metadata()) { + if (request.indices().length > 0) { + mdBuilder.version(currentState.metadata().version()); + String[] indices = indexNameExpressionResolver.concreteIndexNames(currentState, request); + for (String filteredIndex : indices) { + IndexMetadata indexMetadata = currentState.metadata().index(filteredIndex); + if (indexMetadata != null) { + mdBuilder.put(indexMetadata, false); + } + } + } else { + mdBuilder = Metadata.builder(currentState.metadata()); + } + + // filter out metadata that shouldn't be returned by the API + for (final Map.Entry custom : currentState.metadata().customs().entrySet()) { + if (custom.getValue().context().contains(Metadata.XContentContext.API) == false) { + mdBuilder.removeCustom(custom.getKey()); + } + } + } + builder.metadata(mdBuilder); + + if (request.customs()) { + for (ObjectObjectCursor custom : currentState.customs()) { + if (custom.value.isPrivate() == false) { + builder.putCustom(custom.key, custom.value); + } + } + } + + return new ProtobufClusterStateResponse(currentState.getClusterName(), builder.build(), false); + } + +} diff --git a/server/src/main/java/org/opensearch/action/main/ProtobufMainAction.java b/server/src/main/java/org/opensearch/action/main/ProtobufMainAction.java new file mode 100644 index 0000000000000..1a457423f0b00 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/main/ProtobufMainAction.java @@ -0,0 +1,26 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.main; + +import org.opensearch.action.ProtobufActionType; + +/** + * The main OpenSearch Action +* +* @opensearch.internal +*/ +public class ProtobufMainAction extends ProtobufActionType { + + public static final String NAME = "cluster:monitor/main"; + public static final ProtobufMainAction INSTANCE = new ProtobufMainAction(); + + public ProtobufMainAction() { + super(NAME, ProtobufMainResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java b/server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java new file mode 100644 index 0000000000000..3fb3e35d679f0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java @@ -0,0 +1,35 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.main; + +import com.google.protobuf.CodedInputStream; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Transport request for main action +* +* @opensearch.internal +*/ +public class ProtobufMainRequest extends ProtobufActionRequest { + + public ProtobufMainRequest() {} + + ProtobufMainRequest(CodedInputStream in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java b/server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java new file mode 100644 index 0000000000000..36963fd5894f5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java @@ -0,0 +1,185 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.main; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; +import org.opensearch.Build; +import org.opensearch.Version; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.core.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ObjectParser; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * The main response of opensearch +* +* @opensearch.internal +*/ +public class ProtobufMainResponse extends ProtobufActionResponse implements ToXContentObject { + + private String nodeName; + private Version version; + private ProtobufClusterName clusterName; + private String clusterUuid; + private Build build; + public static final String TAGLINE = "The OpenSearch Project: https://opensearch.org/"; + + ProtobufMainResponse() {} + + ProtobufMainResponse(CodedInputStream in) throws IOException { + super(in); + nodeName = in.readString(); + version = Version.readVersionProtobuf(in); + clusterName = new ProtobufClusterName(in); + clusterUuid = in.readString(); + build = Build.readBuildProtobuf(in); + } + + public ProtobufMainResponse(String nodeName, Version version, ProtobufClusterName clusterName, String clusterUuid, Build build) { + this.nodeName = nodeName; + this.version = version; + this.clusterName = clusterName; + this.clusterUuid = clusterUuid; + this.build = build; + } + + public String getNodeName() { + return nodeName; + } + + public Version getVersion() { + return version; + } + + public ProtobufClusterName getClusterName() { + return clusterName; + } + + public String getClusterUuid() { + return clusterUuid; + } + + public Build getBuild() { + return build; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeStringNoTag(nodeName); + out.writeInt32NoTag(version.id); + clusterName.writeTo(out); + out.writeStringNoTag(clusterUuid); + Build.writeBuildProtobuf(build, out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("name", nodeName); + builder.field("cluster_name", clusterName.value()); + builder.field("cluster_uuid", clusterUuid); + builder.startObject("version") + .field("distribution", build.getDistribution()) + .field("number", build.getQualifiedVersion()) + .field("build_type", build.type().displayName()) + .field("build_hash", build.hash()) + .field("build_date", build.date()) + .field("build_snapshot", build.isSnapshot()) + .field("lucene_version", version.luceneVersion.toString()) + .field("minimum_wire_compatibility_version", version.minimumCompatibilityVersion().toString()) + .field("minimum_index_compatibility_version", version.minimumIndexCompatibilityVersion().toString()) + .endObject(); + builder.field("tagline", TAGLINE); + builder.endObject(); + return builder; + } + + private static final ObjectParser PARSER = new ObjectParser<>( + ProtobufMainResponse.class.getName(), + true, + ProtobufMainResponse::new + ); + + static { + PARSER.declareString((response, value) -> response.nodeName = value, new ParseField("name")); + PARSER.declareString((response, value) -> response.clusterName = new ProtobufClusterName(value), new ParseField("cluster_name")); + PARSER.declareString((response, value) -> response.clusterUuid = value, new ParseField("cluster_uuid")); + PARSER.declareString((response, value) -> {}, new ParseField("tagline")); + PARSER.declareObject((response, value) -> { + final String buildType = (String) value.get("build_type"); + response.build = new Build( + /* + * Be lenient when reading on the wire, the enumeration values from other versions might be different than what + * we know. + */ + buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType, false), + (String) value.get("build_hash"), + (String) value.get("build_date"), + (boolean) value.get("build_snapshot"), + (String) value.get("number"), + (String) value.get("distribution") + ); + response.version = Version.fromString( + ((String) value.get("number")).replace("-SNAPSHOT", "").replaceFirst("-(alpha\\d+|beta\\d+|rc\\d+)", "") + ); + }, (parser, context) -> parser.map(), new ParseField("version")); + } + + public static ProtobufMainResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ProtobufMainResponse other = (ProtobufMainResponse) o; + return Objects.equals(nodeName, other.nodeName) + && Objects.equals(version, other.version) + && Objects.equals(clusterUuid, other.clusterUuid) + && Objects.equals(build, other.build) + && Objects.equals(clusterName, other.clusterName); + } + + @Override + public int hashCode() { + return Objects.hash(nodeName, version, clusterUuid, build, clusterName); + } + + @Override + public String toString() { + return "ProtobufMainResponse{" + + "nodeName='" + + nodeName + + '\'' + + ", version=" + + version + + ", clusterName=" + + clusterName + + ", clusterUuid='" + + clusterUuid + + '\'' + + ", build=" + + build + + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java b/server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java new file mode 100644 index 0000000000000..d5f91651d2fb0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java @@ -0,0 +1,56 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.main; + +import org.opensearch.Build; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.ProtobufHandledTransportAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.ProtobufClusterState; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.node.Node; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.transport.ProtobufTransportService; + +/** + * Performs the main action +* +* @opensearch.internal +*/ +public class ProtobufTransportMainAction extends ProtobufHandledTransportAction { + + private final String nodeName; + private final ClusterService clusterService; + + @Inject + public ProtobufTransportMainAction( + Settings settings, + ProtobufTransportService transportService, + ProtobufActionFilters actionFilters, + ClusterService clusterService + ) { + super(MainAction.NAME, transportService, actionFilters, ProtobufMainRequest::new); + this.nodeName = Node.NODE_NAME_SETTING.get(settings); + this.clusterService = clusterService; + } + + @Override + protected void doExecute(ProtobufTask task, ProtobufMainRequest request, ActionListener listener) { + ClusterState clusterState = clusterService.state(); + ProtobufClusterName clusterName = new ProtobufClusterName(clusterState.getClusterName().value()); + listener.onResponse( + new ProtobufMainResponse(nodeName, Version.CURRENT, clusterName, clusterState.metadata().clusterUUID(), Build.CURRENT) + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java b/server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java new file mode 100644 index 0000000000000..5d85e62ea9def --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java @@ -0,0 +1,83 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support; + +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ProtobufTransportChannel; +import org.opensearch.transport.ProtobufTransportRequestHandler; +import org.opensearch.transport.ProtobufTransportService; + +/** + * A ProtobufTransportAction that self registers a handler into the transport service +* +* @opensearch.internal +*/ +public abstract class ProtobufHandledTransportAction extends ProtobufTransportAction< + Request, + Response> { + + protected ProtobufHandledTransportAction( + String actionName, + ProtobufTransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader requestReader + ) { + this(actionName, true, transportService, actionFilters, requestReader); + } + + protected ProtobufHandledTransportAction( + String actionName, + ProtobufTransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader requestReader, + String executor + ) { + this(actionName, true, transportService, actionFilters, requestReader, executor); + } + + protected ProtobufHandledTransportAction( + String actionName, + boolean canTripCircuitBreaker, + ProtobufTransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader requestReader + ) { + this(actionName, canTripCircuitBreaker, transportService, actionFilters, requestReader, ThreadPool.Names.SAME); + } + + protected ProtobufHandledTransportAction( + String actionName, + boolean canTripCircuitBreaker, + ProtobufTransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader requestReader, + String executor + ) { + super(actionName, actionFilters, transportService.getTaskManager()); + transportService.registerRequestHandler(actionName, executor, false, canTripCircuitBreaker, requestReader, new TransportHandler()); + } + + /** + * Inner transport handler + * + * @opensearch.internal + */ + class TransportHandler implements ProtobufTransportRequestHandler { + @Override + public final void messageReceived(final Request request, final ProtobufTransportChannel channel, ProtobufTask task) { + // We already got the task created on the network layer - no need to create it again on the transport layer + execute(task, request, new ProtobufChannelActionListener<>(channel, actionName, request)); + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java b/server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java index 72e4ac90403b0..6f0356fb0dabc 100644 --- a/server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java @@ -19,8 +19,8 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.ProtobufTask; import org.opensearch.tasks.ProtobufTaskId; -import org.opensearch.tasks.ProtobufTaskManager; import org.opensearch.tasks.TaskCancelledException; +import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.ProtobufTaskListener; import java.util.concurrent.atomic.AtomicInteger; @@ -33,14 +33,14 @@ public abstract class ProtobufTransportAction {}; } @@ -71,19 +71,19 @@ public final ProtobufTask execute(Request request, ActionListener list final ProtobufTask task; try { - task = taskManager.register("transport", actionName, request); + task = taskManager.registerProtobuf("transport", actionName, request); } catch (TaskCancelledException e) { unregisterChildNode.close(); throw e; } - ThreadContext.StoredContext storedContext = taskManager.taskExecutionStarted(task); + ThreadContext.StoredContext storedContext = taskManager.protobufTaskExecutionStarted(task); try { execute(task, request, new ActionListener() { @Override public void onResponse(Response response) { try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + Releasables.close(unregisterChildNode, () -> taskManager.unregisterProtobufTask(task)); } finally { listener.onResponse(response); } @@ -92,7 +92,7 @@ public void onResponse(Response response) { @Override public void onFailure(Exception e) { try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + Releasables.close(unregisterChildNode, () -> taskManager.unregisterProtobufTask(task)); } finally { listener.onFailure(e); } @@ -113,18 +113,18 @@ public final ProtobufTask execute(Request request, ProtobufTaskListener() { @Override public void onResponse(Response response) { try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + Releasables.close(unregisterChildNode, () -> taskManager.unregisterProtobufTask(task)); } finally { listener.onResponse(task, response); } @@ -133,7 +133,7 @@ public void onResponse(Response response) { @Override public void onFailure(Exception e) { try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + Releasables.close(unregisterChildNode, () -> taskManager.unregisterProtobufTask(task)); } finally { listener.onFailure(task, e); } @@ -210,9 +210,9 @@ public void proceed(ProtobufTask task, String actionName, Request request, Actio private static class TaskResultStoringActionListener implements ActionListener { private final ActionListener delegate; private final ProtobufTask task; - private final ProtobufTaskManager taskManager; + private final TaskManager taskManager; - private TaskResultStoringActionListener(ProtobufTaskManager taskManager, ProtobufTask task, ActionListener delegate) { + private TaskResultStoringActionListener(TaskManager taskManager, ProtobufTask task, ActionListener delegate) { this.taskManager = taskManager; this.task = task; this.delegate = delegate; @@ -221,7 +221,7 @@ private TaskResultStoringActionListener(ProtobufTaskManager taskManager, Protobu @Override public void onResponse(Response response) { try { - taskManager.storeResult(task, response, delegate); + taskManager.storeResultProtobuf(task, response, delegate); } catch (Exception e) { delegate.onFailure(e); } @@ -230,7 +230,7 @@ public void onResponse(Response response) { @Override public void onFailure(Exception e) { try { - taskManager.storeResult(task, e, delegate); + taskManager.storeResultProtobuf(task, e, delegate); } catch (Exception inner) { inner.addSuppressed(e); delegate.onFailure(inner); diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java new file mode 100644 index 0000000000000..5b2e3b4d927d0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java @@ -0,0 +1,352 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.clustermanager; + +import com.google.protobuf.CodedInputStream; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ProtobufActionListenerResponseHandler; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.ActionRunnable; +import org.opensearch.action.bulk.BackoffPolicy; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.ProtobufHandledTransportAction; +import org.opensearch.action.support.RetryableAction; +import org.opensearch.cluster.ProtobufClusterState; +import org.opensearch.cluster.ProtobufClusterStateObserver; +import org.opensearch.cluster.ClusterManagerNodeChangePredicate; +import org.opensearch.cluster.NotClusterManagerException; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; +import org.opensearch.cluster.metadata.ProtobufIndexNameExpressionResolver; +import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.ProtobufDiscoveryNodes; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler; +import org.opensearch.cluster.service.ClusterManagerThrottlingException; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; +import org.opensearch.node.NodeClosedException; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ConnectTransportException; +import org.opensearch.transport.ProtobufRemoteTransportException; +import org.opensearch.transport.ProtobufTransportException; +import org.opensearch.transport.ProtobufTransportService; + +import java.io.IOException; +import java.util.function.Predicate; + +/** + * A base class for operations that needs to be performed on the cluster-manager node. + * + * @opensearch.internal + */ +public abstract class ProtobufTransportClusterManagerNodeAction, Response extends ProtobufActionResponse> + extends ProtobufHandledTransportAction { + + private static final Logger logger = LogManager.getLogger(ProtobufTransportClusterManagerNodeAction.class); + + protected final ThreadPool threadPool; + protected final ProtobufTransportService transportService; + protected final ClusterService clusterService; + protected final ProtobufIndexNameExpressionResolver indexNameExpressionResolver; + + private final String executor; + + protected ProtobufTransportClusterManagerNodeAction( + String actionName, + ProtobufTransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + ProtobufIndexNameExpressionResolver indexNameExpressionResolver + ) { + this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + } + + protected ProtobufTransportClusterManagerNodeAction( + String actionName, + boolean canTripCircuitBreaker, + ProtobufTransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + ProtobufIndexNameExpressionResolver indexNameExpressionResolver + ) { + super(actionName, canTripCircuitBreaker, transportService, actionFilters, request); + this.transportService = transportService; + this.clusterService = clusterService; + this.threadPool = threadPool; + this.indexNameExpressionResolver = indexNameExpressionResolver; + this.executor = executor(); + } + + protected abstract String executor(); + + protected abstract Response read(CodedInputStream in) throws IOException; + + /** + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(ProtobufClusterManagerNodeRequest, ProtobufClusterState, ActionListener)} + */ + @Deprecated + protected void masterOperation(Request request, ProtobufClusterState state, ActionListener listener) throws Exception { + throw new UnsupportedOperationException("Must be overridden"); + } + + // TODO: Add abstract keyword after removing the deprecated masterOperation() + protected void clusterManagerOperation(Request request, ProtobufClusterState state, ActionListener listener) throws Exception { + masterOperation(request, state, listener); + } + + /** + * Override this operation if access to the task parameter is needed + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(ProtobufTask, ProtobufClusterManagerNodeRequest, ProtobufClusterState, ActionListener)} + */ + @Deprecated + protected void masterOperation(ProtobufTask task, Request request, ProtobufClusterState state, ActionListener listener) throws Exception { + clusterManagerOperation(request, state, listener); + } + + /** + * Override this operation if access to the task parameter is needed + */ + // TODO: Change the implementation to call 'clusterManagerOperation(request...)' after removing the deprecated masterOperation() + protected void clusterManagerOperation(ProtobufTask task, Request request, ProtobufClusterState state, ActionListener listener) + throws Exception { + masterOperation(task, request, state, listener); + } + + protected boolean localExecute(Request request) { + return false; + } + + protected abstract ClusterBlockException checkBlock(Request request, ProtobufClusterState state); + + @Override + protected void doExecute(ProtobufTask task, final Request request, ActionListener listener) { + if (task != null) { + request.setParentTask(clusterService.localNode().getId(), task.getId()); + } + new AsyncSingleAction(task, request, listener).run(); + } + + /** + * Asynchronous single action + * + * @opensearch.internal + */ + class AsyncSingleAction extends RetryableAction { + + private ActionListener listener; + private final Request request; + private ProtobufClusterStateObserver observer; + private final long startTime; + private final ProtobufTask task; + + AsyncSingleAction(ProtobufTask task, Request request, ActionListener listener) { + super( + logger, + threadPool, + ClusterManagerTaskThrottler.getBaseDelayForRetry(), + request.clusterManagerNodeTimeout, + listener, + BackoffPolicy.exponentialEqualJitterBackoff( + ClusterManagerTaskThrottler.getBaseDelayForRetry().millis(), + ClusterManagerTaskThrottler.getMaxDelayForRetry().millis() + ), + ThreadPool.Names.SAME + ); + this.task = task; + this.request = request; + this.startTime = threadPool.relativeTimeInMillis(); + } + + @Override + public void tryAction(ActionListener retryListener) { + ProtobufClusterState state = clusterService.protobufState(); + logger.trace("starting processing request [{}] with cluster state version [{}]", request, state.version()); + this.listener = retryListener; + doStart(state); + } + + @Override + public boolean shouldRetry(Exception e) { + // If remote address is null, i.e request is generated from same node and we would want to perform retry for it + // If remote address is not null, i.e request is generated from remote node and received on this master node on transport layer + // in that case we would want throttling retry to perform on remote node only not on this master node. + if (request.remoteAddress() == null) { + if (e instanceof ProtobufTransportException) { + return ((ProtobufTransportException) e).unwrapCause() instanceof ClusterManagerThrottlingException; + } + return e instanceof ClusterManagerThrottlingException; + } + return false; + } + + /** + * If tasks gets timed out in retrying on throttling, + * it should send cluster event timeout exception. + */ + @Override + public Exception getTimeoutException(Exception e) { + return new ProcessClusterEventTimeoutException(request.masterNodeTimeout, actionName); + } + + protected void doStart(ProtobufClusterState clusterState) { + try { + final ProtobufDiscoveryNodes nodes = clusterState.nodes(); + if (nodes.isLocalNodeElectedClusterManager() || localExecute(request)) { + // check for block, if blocked, retry, else, execute locally + final ClusterBlockException blockException = checkBlock(request, clusterState); + if (blockException != null) { + if (!blockException.retryable()) { + listener.onFailure(blockException); + } else { + logger.debug("can't execute due to a cluster block, retrying", blockException); + retry(clusterState, blockException, newState -> { + try { + ClusterBlockException newException = checkBlock(request, newState); + return (newException == null || !newException.retryable()); + } catch (Exception e) { + // accept state as block will be rechecked by doStart() and listener.onFailure() then called + logger.trace("exception occurred during cluster block checking, accepting state", e); + return true; + } + }); + } + } else { + ActionListener delegate = ActionListener.delegateResponse(listener, (delegatedListener, t) -> { + if (t instanceof FailedToCommitClusterStateException || t instanceof NotClusterManagerException) { + logger.debug( + () -> new ParameterizedMessage( + "master could not publish cluster state or " + + "stepped down before publishing action [{}], scheduling a retry", + actionName + ), + t + ); + retryOnMasterChange(clusterState, t); + } else { + delegatedListener.onFailure(t); + } + }); + threadPool.executor(executor) + .execute(ActionRunnable.wrap(delegate, l -> clusterManagerOperation(task, request, clusterState, l))); + } + } else { + if (nodes.getClusterManagerNode() == null) { + logger.debug("no known cluster-manager node, scheduling a retry"); + retryOnMasterChange(clusterState, null); + } else { + ProtobufDiscoveryNode clusterManagerNode = nodes.getClusterManagerNode(); + final String actionName = getClusterManagerActionName(clusterManagerNode); + transportService.sendRequest( + clusterManagerNode, + actionName, + request, + new ProtobufActionListenerResponseHandler(listener, ProtobufTransportClusterManagerNodeAction.this::read) { + @Override + public void handleException(final ProtobufTransportException exp) { + Throwable cause = exp.unwrapCause(); + if (cause instanceof ConnectTransportException + || (exp instanceof ProtobufRemoteTransportException && cause instanceof NodeClosedException)) { + // we want to retry here a bit to see if a new cluster-manager is elected + logger.debug( + "connection exception while trying to forward request with action name [{}] to " + + "master node [{}], scheduling a retry. Error: [{}]", + actionName, + nodes.getClusterManagerNode(), + exp.getDetailedMessage() + ); + retryOnMasterChange(clusterState, cause); + } else { + listener.onFailure(exp); + } + } + } + ); + } + } + } catch (Exception e) { + listener.onFailure(e); + } + } + + private void retryOnMasterChange(ProtobufClusterState state, Throwable failure) { + retry(state, failure, ClusterManagerNodeChangePredicate.buildProtobuf(state)); + } + + private void retry(ProtobufClusterState state, final Throwable failure, final Predicate statePredicate) { + if (observer == null) { + final long remainingTimeoutMS = request.clusterManagerNodeTimeout().millis() - (threadPool.relativeTimeInMillis() + - startTime); + if (remainingTimeoutMS <= 0) { + logger.debug(() -> new ParameterizedMessage("timed out before retrying [{}] after failure", actionName), failure); + listener.onFailure(new ClusterManagerNotDiscoveredException(failure)); + return; + } + this.observer = new ProtobufClusterStateObserver( + state, + clusterService, + TimeValue.timeValueMillis(remainingTimeoutMS), + logger, + threadPool.getThreadContext() + ); + } + observer.waitForNextChange(new ProtobufClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ProtobufClusterState state) { + doStart(state); + } + + @Override + public void onClusterServiceClose() { + listener.onFailure(new NodeClosedException(clusterService.localNode())); + } + + @Override + public void onTimeout(TimeValue timeout) { + logger.debug( + () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), + failure + ); + listener.onFailure(new ClusterManagerNotDiscoveredException(failure)); + } + }, statePredicate); + } + } + + /** + * Allows to conditionally return a different cluster-manager node action name in the case an action gets renamed. + * This mainly for backwards compatibility should be used rarely + */ + protected String getClusterManagerActionName(ProtobufDiscoveryNode node) { + return actionName; + } + + /** + * Allows to conditionally return a different cluster-manager node action name in the case an action gets renamed. + * This mainly for backwards compatibility should be used rarely + * + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #getClusterManagerActionName(ProtobufDiscoveryNode)} + */ + @Deprecated + protected String getMasterActionName(ProtobufDiscoveryNode node) { + return getClusterManagerActionName(node); + } + +} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeReadAction.java new file mode 100644 index 0000000000000..18475a6ce6283 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeReadAction.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.clustermanager; + +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.cluster.metadata.ProtobufIndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ProtobufTransportService; + +/** + * A base class for read operations that needs to be performed on the cluster-manager node. + * Can also be executed on the local node if needed. + * + * @opensearch.internal + */ +public abstract class ProtobufTransportClusterManagerNodeReadAction< + Request extends ProtobufClusterManagerNodeReadRequest, + Response extends ProtobufActionResponse> extends ProtobufTransportClusterManagerNodeAction { + + protected ProtobufTransportClusterManagerNodeReadAction( + String actionName, + ProtobufTransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + ProtobufIndexNameExpressionResolver indexNameExpressionResolver + ) { + this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + } + + protected ProtobufTransportClusterManagerNodeReadAction( + String actionName, + boolean checkSizeLimit, + ProtobufTransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + ProtobufIndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + actionName, + checkSizeLimit, + transportService, + clusterService, + threadPool, + actionFilters, + request, + indexNameExpressionResolver + ); + } + + @Override + protected final boolean localExecute(Request request) { + return request.local(); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java new file mode 100644 index 0000000000000..4b805811f49c2 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java @@ -0,0 +1,309 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support.nodes; + +import com.google.protobuf.CodedInputStream; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRunnable; +import org.opensearch.action.ProtobufFailedNodeException; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.ProtobufHandledTransportAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.NodeShouldNotConnectException; +import org.opensearch.transport.ProtobufTransportChannel; +import org.opensearch.transport.ProtobufTransportException; +import org.opensearch.transport.ProtobufTransportRequest; +import org.opensearch.transport.ProtobufTransportRequestHandler; +import org.opensearch.transport.TransportRequestOptions; +import org.opensearch.transport.ProtobufTransportResponseHandler; +import org.opensearch.transport.ProtobufTransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * Base action class for transport nodes +* +* @opensearch.internal +*/ +public abstract class ProtobufTransportNodesAction< + NodesRequest extends ProtobufBaseNodesRequest, + NodesResponse extends ProtobufBaseNodesResponse, + NodeRequest extends ProtobufTransportRequest, + NodeResponse extends ProtobufBaseNodeResponse> extends ProtobufHandledTransportAction { + + protected final ThreadPool threadPool; + protected final ClusterService clusterService; + protected final ProtobufTransportService transportService; + protected final Class nodeResponseClass; + protected final String transportNodeAction; + + private final String finalExecutor; + + /** + * @param actionName action name + * @param threadPool thread-pool + * @param clusterService cluster service + * @param transportService transport service + * @param actionFilters action filters + * @param request node request writer + * @param nodeRequest node request reader + * @param nodeExecutor executor to execute node action on + * @param finalExecutor executor to execute final collection of all responses on + * @param nodeResponseClass class of the node responses + */ + protected ProtobufTransportNodesAction( + String actionName, + ThreadPool threadPool, + ClusterService clusterService, + ProtobufTransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + ProtobufWriteable.Reader nodeRequest, + String nodeExecutor, + String finalExecutor, + Class nodeResponseClass + ) { + super(actionName, transportService, actionFilters, request); + this.threadPool = threadPool; + this.clusterService = Objects.requireNonNull(clusterService); + this.transportService = Objects.requireNonNull(transportService); + this.nodeResponseClass = Objects.requireNonNull(nodeResponseClass); + + this.transportNodeAction = actionName + "[n]"; + this.finalExecutor = finalExecutor; + transportService.registerRequestHandler(transportNodeAction, nodeExecutor, nodeRequest, new NodeTransportHandler()); + } + + /** + * Same as {@link #ProtobufTransportNodesAction(String, ThreadPool, ClusterService, ProtobufTransportService, ProtobufActionFilters, ProtobufWriteable.Reader, + * ProtobufWriteable.Reader, String, String, Class)} but executes final response collection on the transport thread except for when the final + * node response is received from the local node, in which case {@code nodeExecutor} is used. + * This constructor should only be used for actions for which the creation of the final response is fast enough to be safely executed + * on a transport thread. + */ + protected ProtobufTransportNodesAction( + String actionName, + ThreadPool threadPool, + ClusterService clusterService, + ProtobufTransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + ProtobufWriteable.Reader nodeRequest, + String nodeExecutor, + Class nodeResponseClass + ) { + this( + actionName, + threadPool, + clusterService, + transportService, + actionFilters, + request, + nodeRequest, + nodeExecutor, + ThreadPool.Names.SAME, + nodeResponseClass + ); + } + + @Override + protected void doExecute(ProtobufTask task, NodesRequest request, ActionListener listener) { + new AsyncAction(task, request, listener).start(); + } + + /** + * Map the responses into {@code nodeResponseClass} responses and {@link ProtobufFailedNodeException}s. + * + * @param request The associated request. + * @param nodesResponses All node-level responses + * @return Never {@code null}. + * @throws NullPointerException if {@code nodesResponses} is {@code null} + * @see #newResponse(ProtobufBaseNodesRequest, List, List) + */ + protected NodesResponse newResponse(NodesRequest request, AtomicReferenceArray nodesResponses) { + final List responses = new ArrayList<>(); + final List failures = new ArrayList<>(); + + for (int i = 0; i < nodesResponses.length(); ++i) { + Object response = nodesResponses.get(i); + + if (response instanceof ProtobufFailedNodeException) { + failures.add((ProtobufFailedNodeException) response); + } else { + responses.add(nodeResponseClass.cast(response)); + } + } + + return newResponse(request, responses, failures); + } + + /** + * Create a new {@link NodesResponse} (multi-node response). + * + * @param request The associated request. + * @param responses All successful node-level responses. + * @param failures All node-level failures. + * @return Never {@code null}. + * @throws NullPointerException if any parameter is {@code null}. + */ + protected abstract NodesResponse newResponse(NodesRequest request, List responses, List failures); + + protected abstract NodeRequest newNodeRequest(NodesRequest request); + + protected abstract NodeResponse newNodeResponse(CodedInputStream in) throws IOException; + + protected abstract NodeResponse nodeOperation(NodeRequest request); + + protected NodeResponse nodeOperation(NodeRequest request, ProtobufTask task) { + return nodeOperation(request); + } + + /** + * resolve node ids to concrete nodes of the incoming request + **/ + protected void resolveRequest(NodesRequest request, ClusterState clusterState) { + assert request.concreteNodes() == null : "request concreteNodes shouldn't be set"; + String[] nodesIds = clusterState.nodes().resolveNodes(request.nodesIds()); + request.setConcreteNodes(Arrays.stream(nodesIds).map(clusterState.nodes()::get).toArray(ProtobufDiscoveryNode[]::new)); + } + + /** + * Get a backwards compatible transport action name + */ + protected String getTransportNodeAction(ProtobufDiscoveryNode node) { + return transportNodeAction; + } + + /** + * Asynchronous action + * + * @opensearch.internal + */ + class AsyncAction { + + private final NodesRequest request; + private final ActionListener listener; + private final AtomicReferenceArray responses; + private final AtomicInteger counter = new AtomicInteger(); + private final ProtobufTask task; + + AsyncAction(ProtobufTask task, NodesRequest request, ActionListener listener) { + this.task = task; + this.request = request; + this.listener = listener; + if (request.concreteNodes() == null) { + resolveRequest(request, clusterService.state()); + assert request.concreteNodes() != null; + } + this.responses = new AtomicReferenceArray<>(request.concreteNodes().length); + } + + void start() { + final ProtobufDiscoveryNode[] nodes = request.concreteNodes(); + if (nodes.length == 0) { + // nothing to notify + threadPool.generic().execute(() -> listener.onResponse(newResponse(request, responses))); + return; + } + TransportRequestOptions.Builder builder = TransportRequestOptions.builder(); + if (request.timeout() != null) { + builder.withTimeout(request.timeout()); + } + for (int i = 0; i < nodes.length; i++) { + final int idx = i; + final ProtobufDiscoveryNode node = nodes[i]; + final String nodeId = node.getId(); + try { + ProtobufTransportRequest nodeRequest = newNodeRequest(request); + if (task != null) { + nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId()); + } + + transportService.sendRequest( + node, + getTransportNodeAction(node), + nodeRequest, + builder.build(), + new ProtobufTransportResponseHandler() { + @Override + public NodeResponse read(CodedInputStream in) throws IOException { + return newNodeResponse(in); + } + + @Override + public void handleResponse(NodeResponse response) { + onOperation(idx, response); + } + + @Override + public void handleException(ProtobufTransportException exp) { + onFailure(idx, node.getId(), exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + } + ); + } catch (Exception e) { + onFailure(idx, nodeId, e); + } + } + } + + private void onOperation(int idx, NodeResponse nodeResponse) { + responses.set(idx, nodeResponse); + if (counter.incrementAndGet() == responses.length()) { + finishHim(); + } + } + + private void onFailure(int idx, String nodeId, Throwable t) { + if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { + logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); + } + responses.set(idx, new ProtobufFailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); + if (counter.incrementAndGet() == responses.length()) { + finishHim(); + } + } + + private void finishHim() { + threadPool.executor(finalExecutor).execute(ActionRunnable.supply(listener, () -> newResponse(request, responses))); + } + } + + /** + * A node transport handler + * + * @opensearch.internal + */ + class NodeTransportHandler implements ProtobufTransportRequestHandler { + + @Override + public void messageReceived(NodeRequest request, ProtobufTransportChannel channel, ProtobufTask task) throws Exception { + channel.sendResponse(nodeOperation(request, task)); + } + } + +} diff --git a/server/src/main/java/org/opensearch/client/ProtobufFilterClient.java b/server/src/main/java/org/opensearch/client/ProtobufFilterClient.java index d5bbd47379070..f4e76450f679a 100644 --- a/server/src/main/java/org/opensearch/client/ProtobufFilterClient.java +++ b/server/src/main/java/org/opensearch/client/ProtobufFilterClient.java @@ -14,7 +14,7 @@ import org.opensearch.action.ProtobufActionResponse; import org.opensearch.client.support.ProtobufAbstractClient; import org.opensearch.common.settings.Settings; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; /** * A {@link ProtobufClient} that contains another {@link ProtobufClient} which it @@ -41,7 +41,7 @@ public ProtobufFilterClient(ProtobufClient in) { * A Constructor that allows to pass settings and threadpool separately. This is useful if the * client is a proxy and not yet fully constructed ie. both dependencies are not available yet. */ - protected ProtobufFilterClient(Settings settings, ProtobufThreadPool threadPool, ProtobufClient in) { + protected ProtobufFilterClient(Settings settings, ThreadPool threadPool, ProtobufClient in) { super(settings, threadPool); this.in = in; } diff --git a/server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java b/server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java index a521ae469deed..2db9f9c93e940 100644 --- a/server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java +++ b/server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java @@ -13,7 +13,7 @@ import org.opensearch.action.ProtobufActionType; import org.opensearch.action.ActionFuture; import org.opensearch.action.ActionListener; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; /** * Interface for an OpenSearch client implementation @@ -54,6 +54,6 @@ /** * Returns the threadpool used to execute requests on this client */ - ProtobufThreadPool threadPool(); + ThreadPool threadPool(); } diff --git a/server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java b/server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java index 0602740a0e304..b5d729b59cb82 100644 --- a/server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java +++ b/server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java @@ -18,12 +18,12 @@ import org.opensearch.client.ProtobufClient; import org.opensearch.client.support.ProtobufAbstractClient; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.io.stream.ProtobufNamedWriteableRegistry; +import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; import org.opensearch.tasks.ProtobufTask; import org.opensearch.tasks.ProtobufTaskListener; -import org.opensearch.threadpool.ProtobufThreadPool; -import org.opensearch.transport.ProtobufRemoteClusterService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.RemoteClusterService; import java.util.function.Supplier; @@ -40,18 +40,18 @@ public class ProtobufNodeClient extends ProtobufAbstractClient { * {@link #executeLocally(ProtobufActionType, ProtobufActionRequest, ProtobufTaskListener)}. */ private Supplier localNodeId; - private ProtobufRemoteClusterService remoteClusterService; - private ProtobufNamedWriteableRegistry namedWriteableRegistry; + private RemoteClusterService remoteClusterService; + private NamedWriteableRegistry namedWriteableRegistry; - public ProtobufNodeClient(Settings settings, ProtobufThreadPool threadPool) { + public ProtobufNodeClient(Settings settings, ThreadPool threadPool) { super(settings, threadPool); } public void initialize( ProtobufDynamicActionRegistry actionRegistry, Supplier localNodeId, - ProtobufRemoteClusterService remoteClusterService, - ProtobufNamedWriteableRegistry namedWriteableRegistry + RemoteClusterService remoteClusterService, + NamedWriteableRegistry namedWriteableRegistry ) { this.actionRegistry = actionRegistry; this.localNodeId = localNodeId; @@ -130,10 +130,10 @@ Response extends ProtobufActionResponse> ProtobufTransportAction build(ClusterState currentState) { return accept; }; } + + /** + * builds a predicate that will accept a cluster state only if it was generated after the current has + * (re-)joined the master + */ + public static Predicate buildProtobuf(ProtobufClusterState currentState) { + final long currentVersion = currentState.version(); + final ProtobufDiscoveryNode clusterManagerNode = currentState.nodes().getClusterManagerNode(); + final String currentMasterId = clusterManagerNode == null ? null : clusterManagerNode.getEphemeralId(); + return newState -> { + final ProtobufDiscoveryNode newClusterManager = newState.nodes().getClusterManagerNode(); + final boolean accept; + if (newClusterManager == null) { + accept = false; + } else if (newClusterManager.getEphemeralId().equals(currentMasterId) == false) { + accept = true; + } else { + accept = newState.version() > currentVersion; + } + return accept; + }; + } } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index b80fd1d746831..d6c84eab5213b 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.metadata.DataStreamMetadata; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.ProtobufIndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.MetadataDeleteIndexService; import org.opensearch.cluster.metadata.MetadataIndexAliasesService; @@ -129,6 +130,7 @@ public class ClusterModule extends AbstractModule { private final ClusterService clusterService; private final IndexNameExpressionResolver indexNameExpressionResolver; + private final ProtobufIndexNameExpressionResolver protobufIndexNameExpressionResolver; private final AllocationDeciders allocationDeciders; private final AllocationService allocationService; private final List clusterPlugins; @@ -150,6 +152,7 @@ public ClusterModule( this.shardsAllocator = createShardsAllocator(settings, clusterService.getClusterSettings(), clusterPlugins); this.clusterService = clusterService; this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadContext); + this.protobufIndexNameExpressionResolver = new ProtobufIndexNameExpressionResolver(threadContext); this.allocationService = new AllocationService(allocationDeciders, shardsAllocator, clusterInfoService, snapshotsInfoService); } @@ -342,6 +345,10 @@ public IndexNameExpressionResolver getIndexNameExpressionResolver() { return indexNameExpressionResolver; } + public ProtobufIndexNameExpressionResolver getProtobufIndexNameExpressionResolver() { + return protobufIndexNameExpressionResolver; + } + // TODO: this is public so allocation benchmark can access the default deciders...can we do that in another way? /** Return a new {@link AllocationDecider} instance with builtin deciders as well as those from plugins. */ public static Collection createAllocationDeciders( @@ -425,6 +432,7 @@ protected void configure() { bind(MetadataUpdateSettingsService.class).asEagerSingleton(); bind(MetadataIndexTemplateService.class).asEagerSingleton(); bind(IndexNameExpressionResolver.class).toInstance(indexNameExpressionResolver); + bind(ProtobufIndexNameExpressionResolver.class).toInstance(protobufIndexNameExpressionResolver); bind(DelayedAllocationService.class).asEagerSingleton(); bind(ShardStateAction.class).asEagerSingleton(); bind(NodeMappingRefreshAction.class).asEagerSingleton(); diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateApplier.java b/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateApplier.java index 3169688ba9b3e..3d502f396c0c6 100644 --- a/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateApplier.java +++ b/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateApplier.java @@ -25,5 +25,5 @@ public interface ProtobufClusterStateApplier { * reached all the other appliers, and will likely result in another attempt to apply the same (or very similar) cluster state which * might continue until this node is removed from the cluster. */ - void applyClusterState(ProtobufClusterChangedEvent event); + void applyProtobufClusterState(ProtobufClusterChangedEvent event); } diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateListener.java new file mode 100644 index 0000000000000..b5638d6de0611 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateListener.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster; + +/** + * A listener to be notified when a cluster state changes. + * + * @opensearch.internal + */ +public interface ProtobufClusterStateListener { + + /** + * Called when cluster state changes. + */ + void clusterChanged(ProtobufClusterChangedEvent event); +} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateObserver.java b/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateObserver.java new file mode 100644 index 0000000000000..828d181b37a09 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufClusterStateObserver.java @@ -0,0 +1,379 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster; + +import org.apache.logging.log4j.Logger; +import org.opensearch.OpenSearchException; +import org.opensearch.cluster.service.ClusterApplierService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Nullable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import java.util.function.Supplier; + +/** + * A utility class which simplifies interacting with the cluster state in cases where + * one tries to take action based on the current state but may want to wait for a new state + * and retry upon failure. + * + * @opensearch.internal + */ +public class ProtobufClusterStateObserver { + + protected final Logger logger; + + private final Predicate MATCH_ALL_CHANGES_PREDICATE = state -> true; + + private final ClusterApplierService clusterApplierService; + private final ThreadPool threadPool; + private final ThreadContext contextHolder; + volatile TimeValue timeOutValue; + + final AtomicReference lastObservedState; + final ProtobufTimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener(); + // observingContext is not null when waiting on cluster state changes + final AtomicReference observingContext = new AtomicReference<>(null); + volatile Long startTimeMS; + volatile boolean timedOut; + + public ProtobufClusterStateObserver(ClusterService clusterService, Logger logger, ThreadContext contextHolder) { + this(clusterService, new TimeValue(60000), logger, contextHolder); + } + + /** + * @param timeout a global timeout for this observer. After it has expired the observer + * will fail any existing or new #waitForNextChange calls. Set to null + * to wait indefinitely + */ + public ProtobufClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, ThreadContext contextHolder) { + this(clusterService.protobufState(), clusterService, timeout, logger, contextHolder); + } + + /** + * @param timeout a global timeout for this observer. After it has expired the observer + * will fail any existing or new #waitForNextChange calls. Set to null + * to wait indefinitely + */ + public ProtobufClusterStateObserver( + ProtobufClusterState initialState, + ClusterService clusterService, + @Nullable TimeValue timeout, + Logger logger, + ThreadContext contextHolder + ) { + this(initialState, clusterService.getClusterApplierService(), timeout, logger, contextHolder); + } + + public ProtobufClusterStateObserver( + ProtobufClusterState initialState, + ClusterApplierService clusterApplierService, + @Nullable TimeValue timeout, + Logger logger, + ThreadContext contextHolder + ) { + this.clusterApplierService = clusterApplierService; + this.threadPool = clusterApplierService.threadPool(); + this.lastObservedState = new AtomicReference<>(new StoredState(initialState)); + this.timeOutValue = timeout; + if (timeOutValue != null) { + this.startTimeMS = threadPool.relativeTimeInMillis(); + } + this.logger = logger; + this.contextHolder = contextHolder; + } + + /** sets the last observed state to the currently applied cluster state and returns it */ + public ProtobufClusterState setAndGetObservedState() { + if (observingContext.get() != null) { + throw new OpenSearchException("cannot set current cluster state while waiting for a cluster state change"); + } + ProtobufClusterState clusterState = clusterApplierService.protobufState(); + lastObservedState.set(new StoredState(clusterState)); + return clusterState; + } + + /** indicates whether this observer has timed out */ + public boolean isTimedOut() { + return timedOut; + } + + public void waitForNextChange(Listener listener) { + waitForNextChange(listener, MATCH_ALL_CHANGES_PREDICATE); + } + + public void waitForNextChange(Listener listener, @Nullable TimeValue timeOutValue) { + waitForNextChange(listener, MATCH_ALL_CHANGES_PREDICATE, timeOutValue); + } + + public void waitForNextChange(Listener listener, Predicate statePredicate) { + waitForNextChange(listener, statePredicate, null); + } + + /** + * Wait for the next cluster state which satisfies statePredicate + * + * @param listener callback listener + * @param statePredicate predicate to check whether cluster state changes are relevant and the callback should be called + * @param timeOutValue a timeout for waiting. If null the global observer timeout will be used. + */ + public void waitForNextChange(Listener listener, Predicate statePredicate, @Nullable TimeValue timeOutValue) { + listener = new ContextPreservingListener(listener, contextHolder.newRestorableContext(false)); + if (observingContext.get() != null) { + throw new OpenSearchException("already waiting for a cluster state change"); + } + + Long timeoutTimeLeftMS; + if (timeOutValue == null) { + timeOutValue = this.timeOutValue; + if (timeOutValue != null) { + long timeSinceStartMS = threadPool.relativeTimeInMillis() - startTimeMS; + timeoutTimeLeftMS = timeOutValue.millis() - timeSinceStartMS; + if (timeoutTimeLeftMS <= 0L) { + // things have timeout while we were busy -> notify + logger.trace( + "observer timed out. notifying listener. timeout setting [{}], time since start [{}]", + timeOutValue, + new TimeValue(timeSinceStartMS) + ); + // update to latest, in case people want to retry + timedOut = true; + lastObservedState.set(new StoredState(clusterApplierService.protobufState())); + listener.onTimeout(timeOutValue); + return; + } + } else { + timeoutTimeLeftMS = null; + } + } else { + this.startTimeMS = threadPool.relativeTimeInMillis(); + this.timeOutValue = timeOutValue; + timeoutTimeLeftMS = timeOutValue.millis(); + timedOut = false; + } + + // sample a new state. This state maybe *older* than the supplied state if we are called from an applier, + // which wants to wait for something else to happen + ProtobufClusterState newState = clusterApplierService.protobufState(); + if (lastObservedState.get().isOlderOrDifferentClusterManager(newState) && statePredicate.test(newState)) { + // good enough, let's go. + logger.trace("observer: sampled state accepted by predicate ({})", newState); + lastObservedState.set(new StoredState(newState)); + listener.onNewClusterState(newState); + } else { + logger.trace("observer: sampled state rejected by predicate ({}). adding listener to ClusterService", newState); + final ObservingContext context = new ObservingContext(listener, statePredicate); + if (!observingContext.compareAndSet(null, context)) { + throw new OpenSearchException("already waiting for a cluster state change"); + } + // clusterApplierService.addTimeoutListener( + // timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), + // clusterStateListener + // ); + } + } + + /** + * An observer of the cluster state for changes. + * + * @opensearch.internal + */ + class ObserverClusterStateListener implements ProtobufTimeoutClusterStateListener { + + @Override + public void clusterChanged(ProtobufClusterChangedEvent event) { + ObservingContext context = observingContext.get(); + if (context == null) { + // No need to remove listener as it is the responsibility of the thread that set observingContext to null + return; + } + final ProtobufClusterState state = event.state(); + if (context.statePredicate.test(state)) { + if (observingContext.compareAndSet(context, null)) { + // clusterApplierService.removeTimeoutListener(this); + logger.trace("observer: accepting cluster state change ({})", state); + lastObservedState.set(new StoredState(state)); + context.listener.onNewClusterState(state); + } else { + logger.trace( + "observer: predicate approved change but observing context has changed " + + "- ignoring (new cluster state version [{}])", + state.version() + ); + } + } else { + logger.trace("observer: predicate rejected change (new cluster state version [{}])", state.version()); + } + } + + @Override + public void postAdded() { + ObservingContext context = observingContext.get(); + if (context == null) { + // No need to remove listener as it is the responsibility of the thread that set observingContext to null + return; + } + ProtobufClusterState newState = clusterApplierService.protobufState(); + if (lastObservedState.get().isOlderOrDifferentClusterManager(newState) && context.statePredicate.test(newState)) { + // double check we're still listening + if (observingContext.compareAndSet(context, null)) { + logger.trace("observer: post adding listener: accepting current cluster state ({})", newState); + // clusterApplierService.removeTimeoutListener(this); + lastObservedState.set(new StoredState(newState)); + context.listener.onNewClusterState(newState); + } else { + logger.trace( + "observer: postAdded - predicate approved state but observing context has changed - ignoring ({})", + newState + ); + } + } else { + logger.trace("observer: postAdded - predicate rejected state ({})", newState); + } + } + + @Override + public void onClose() { + ObservingContext context = observingContext.getAndSet(null); + + if (context != null) { + logger.trace("observer: cluster service closed. notifying listener."); + // clusterApplierService.removeTimeoutListener(this); + context.listener.onClusterServiceClose(); + } + } + + @Override + public void onTimeout(TimeValue timeout) { + ObservingContext context = observingContext.getAndSet(null); + if (context != null) { + // clusterApplierService.removeTimeoutListener(this); + long timeSinceStartMS = threadPool.relativeTimeInMillis() - startTimeMS; + logger.trace( + "observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", + timeOutValue, + new TimeValue(timeSinceStartMS) + ); + // update to latest, in case people want to retry + lastObservedState.set(new StoredState(clusterApplierService.protobufState())); + timedOut = true; + context.listener.onTimeout(timeOutValue); + } + } + + @Override + public String toString() { + return "ProtobufClusterStateObserver[" + observingContext.get() + "]"; + } + } + + /** + * The observer considers two cluster states to be the same if they have the same version and cluster-manager node id (i.e. null or set) + * + * @opensearch.internal + */ + private static class StoredState { + private final String clusterManagerNodeId; + private final long version; + + StoredState(ProtobufClusterState clusterState) { + this.clusterManagerNodeId = clusterState.nodes().getClusterManagerNodeId(); + this.version = clusterState.version(); + } + + /** + * returns true if stored state is older then given state or they are from a different cluster-manager, meaning they can't be compared + * */ + public boolean isOlderOrDifferentClusterManager(ProtobufClusterState clusterState) { + return version < clusterState.version() + || Objects.equals(clusterManagerNodeId, clusterState.nodes().getClusterManagerNodeId()) == false; + } + } + + /** + * Listener for the observer. + * + * @opensearch.internal + */ + public interface Listener { + + /** called when a new state is observed */ + void onNewClusterState(ProtobufClusterState state); + + /** called when the cluster service is closed */ + void onClusterServiceClose(); + + void onTimeout(TimeValue timeout); + } + + /** + * Context for the observer. + * + * @opensearch.internal + */ + static class ObservingContext { + public final Listener listener; + public final Predicate statePredicate; + + ObservingContext(Listener listener, Predicate statePredicate) { + this.listener = listener; + this.statePredicate = statePredicate; + } + + @Override + public String toString() { + return "ObservingContext[" + listener + "]"; + } + } + + /** + * A context preserving listener. + * + * @opensearch.internal + */ + private static final class ContextPreservingListener implements Listener { + private final Listener delegate; + private final Supplier contextSupplier; + + private ContextPreservingListener(Listener delegate, Supplier contextSupplier) { + this.contextSupplier = contextSupplier; + this.delegate = delegate; + } + + @Override + public void onNewClusterState(ProtobufClusterState state) { + try (ThreadContext.StoredContext context = contextSupplier.get()) { + delegate.onNewClusterState(state); + } + } + + @Override + public void onClusterServiceClose() { + try (ThreadContext.StoredContext context = contextSupplier.get()) { + delegate.onClusterServiceClose(); + } + } + + @Override + public void onTimeout(TimeValue timeout) { + try (ThreadContext.StoredContext context = contextSupplier.get()) { + delegate.onTimeout(timeout); + } + } + + @Override + public String toString() { + return "ContextPreservingListener[" + delegate + "]"; + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufTimeoutClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/ProtobufTimeoutClusterStateListener.java new file mode 100644 index 0000000000000..99ce33a6a71e6 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ProtobufTimeoutClusterStateListener.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster; + +import org.opensearch.common.unit.TimeValue; + +/** + * An exception to cluster state listener that allows for timeouts and for post added notifications. + * + * @opensearch.internal + */ +public interface ProtobufTimeoutClusterStateListener extends ProtobufClusterStateListener { + + void postAdded(); + + void onClose(); + + void onTimeout(TimeValue timeout); +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ProtobufIndexNameExpressionResolver.java b/server/src/main/java/org/opensearch/cluster/metadata/ProtobufIndexNameExpressionResolver.java new file mode 100644 index 0000000000000..dbb1bc319575d --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/ProtobufIndexNameExpressionResolver.java @@ -0,0 +1,1342 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + + +package org.opensearch.cluster.metadata; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.cluster.ProtobufClusterState; +import org.opensearch.common.Booleans; +import org.opensearch.common.Nullable; +import org.opensearch.common.Strings; +import org.opensearch.common.collect.ImmutableOpenMap; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.regex.Regex; +import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.time.DateMathParser; +import org.opensearch.common.time.DateUtils; +import org.opensearch.common.util.CollectionUtils; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.set.Sets; +import org.opensearch.index.Index; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.indices.IndexClosedException; +import org.opensearch.indices.InvalidIndexNameException; + +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.SortedMap; +import java.util.Spliterators; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +/** + * Resolves index name from an expression + * + * @opensearch.internal + */ +public class ProtobufIndexNameExpressionResolver { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ProtobufIndexNameExpressionResolver.class); + + public static final String EXCLUDED_DATA_STREAMS_KEY = "opensearch.excluded_ds"; + public static final String SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY = "_system_index_access_allowed"; + + private final DateMathExpressionResolver dateMathExpressionResolver = new DateMathExpressionResolver(); + private final WildcardExpressionResolver wildcardExpressionResolver = new WildcardExpressionResolver(); + private final List expressionResolvers = List.of(dateMathExpressionResolver, wildcardExpressionResolver); + + private final ThreadContext threadContext; + + public ProtobufIndexNameExpressionResolver(ThreadContext threadContext) { + this.threadContext = Objects.requireNonNull(threadContext, "Thread Context must not be null"); + } + + /** + * Same as {@link #concreteIndexNames(ProtobufClusterState, IndicesOptions, String...)}, but the index expressions and options + * are encapsulated in the specified request. + */ + public String[] concreteIndexNames(ProtobufClusterState state, IndicesRequest request) { + Context context = new Context( + state, + request.indicesOptions(), + false, + false, + request.includeDataStreams(), + isSystemIndexAccessAllowed() + ); + return concreteIndexNames(context, request.indices()); + } + + /** + * Same as {@link #concreteIndexNames(ProtobufClusterState, IndicesRequest)}, but access to system indices is always allowed. + */ + public String[] concreteIndexNamesWithSystemIndexAccess(ProtobufClusterState state, IndicesRequest request) { + Context context = new Context(state, request.indicesOptions(), false, false, request.includeDataStreams(), true); + return concreteIndexNames(context, request.indices()); + } + + /** + * Same as {@link #concreteIndices(ProtobufClusterState, IndicesOptions, String...)}, but the index expressions and options + * are encapsulated in the specified request and resolves data streams. + */ + public Index[] concreteIndices(ProtobufClusterState state, IndicesRequest request) { + Context context = new Context( + state, + request.indicesOptions(), + false, + false, + request.includeDataStreams(), + isSystemIndexAccessAllowed() + ); + return concreteIndices(context, request.indices()); + } + + /** + * Translates the provided index expression into actual concrete indices, properly deduplicated. + * + * @param state the cluster state containing all the data to resolve to expressions to concrete indices + * @param options defines how the aliases or indices need to be resolved to concrete indices + * @param indexExpressions expressions that can be resolved to alias or index names. + * @return the resolved concrete indices based on the cluster state, indices options and index expressions + * @throws IndexNotFoundException if one of the index expressions is pointing to a missing index or alias and the + * provided indices options in the context don't allow such a case, or if the final result of the indices resolution + * contains no indices and the indices options in the context don't allow such a case. + * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided + * indices options in the context don't allow such a case. + */ + public String[] concreteIndexNames(ProtobufClusterState state, IndicesOptions options, String... indexExpressions) { + Context context = new Context(state, options, isSystemIndexAccessAllowed()); + return concreteIndexNames(context, indexExpressions); + } + + public String[] concreteIndexNames(ProtobufClusterState state, IndicesOptions options, boolean includeDataStreams, String... indexExpressions) { + Context context = new Context(state, options, false, false, includeDataStreams, isSystemIndexAccessAllowed()); + return concreteIndexNames(context, indexExpressions); + } + + public String[] concreteIndexNames(ProtobufClusterState state, IndicesOptions options, IndicesRequest request) { + Context context = new Context(state, options, false, false, request.includeDataStreams(), isSystemIndexAccessAllowed()); + return concreteIndexNames(context, request.indices()); + } + + public List dataStreamNames(ProtobufClusterState state, IndicesOptions options, String... indexExpressions) { + // Allow system index access - they'll be filtered out below as there's no such thing (yet) as system data streams + Context context = new Context(state, options, false, false, true, true, true); + if (indexExpressions == null || indexExpressions.length == 0) { + indexExpressions = new String[] { "*" }; + } + + List dataStreams = wildcardExpressionResolver.resolve(context, Arrays.asList(indexExpressions)); + return ((dataStreams == null) ? List.of() : dataStreams).stream() + .map(x -> state.metadata().getIndicesLookup().get(x)) + .filter(Objects::nonNull) + .filter(ia -> ia.getType() == IndexAbstraction.Type.DATA_STREAM) + .map(IndexAbstraction::getName) + .collect(Collectors.toList()); + } + + /** + * Translates the provided index expression into actual concrete indices, properly deduplicated. + * + * @param state the cluster state containing all the data to resolve to expressions to concrete indices + * @param options defines how the aliases or indices need to be resolved to concrete indices + * @param indexExpressions expressions that can be resolved to alias or index names. + * @return the resolved concrete indices based on the cluster state, indices options and index expressions + * @throws IndexNotFoundException if one of the index expressions is pointing to a missing index or alias and the + * provided indices options in the context don't allow such a case, or if the final result of the indices resolution + * contains no indices and the indices options in the context don't allow such a case. + * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided + * indices options in the context don't allow such a case. + */ + public Index[] concreteIndices(ProtobufClusterState state, IndicesOptions options, String... indexExpressions) { + return concreteIndices(state, options, false, indexExpressions); + } + + public Index[] concreteIndices(ProtobufClusterState state, IndicesOptions options, boolean includeDataStreams, String... indexExpressions) { + Context context = new Context(state, options, false, false, includeDataStreams, isSystemIndexAccessAllowed()); + return concreteIndices(context, indexExpressions); + } + + /** + * Translates the provided index expression into actual concrete indices, properly deduplicated. + * + * @param state the cluster state containing all the data to resolve to expressions to concrete indices + * @param startTime The start of the request where concrete indices is being invoked for + * @param request request containing expressions that can be resolved to alias, index, or data stream names. + * @return the resolved concrete indices based on the cluster state, indices options and index expressions + * provided indices options in the context don't allow such a case, or if the final result of the indices resolution + * contains no indices and the indices options in the context don't allow such a case. + * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided + * indices options in the context don't allow such a case. + */ + public Index[] concreteIndices(ProtobufClusterState state, IndicesRequest request, long startTime) { + Context context = new Context( + state, + request.indicesOptions(), + startTime, + false, + false, + request.includeDataStreams(), + false, + isSystemIndexAccessAllowed() + ); + return concreteIndices(context, request.indices()); + } + + String[] concreteIndexNames(Context context, String... indexExpressions) { + Index[] indexes = concreteIndices(context, indexExpressions); + String[] names = new String[indexes.length]; + for (int i = 0; i < indexes.length; i++) { + names[i] = indexes[i].getName(); + } + return names; + } + + Index[] concreteIndices(Context context, String... indexExpressions) { + if (indexExpressions == null || indexExpressions.length == 0) { + indexExpressions = new String[] { Metadata.ALL }; + } + Metadata metadata = context.getState().metadata(); + IndicesOptions options = context.getOptions(); + // If only one index is specified then whether we fail a request if an index is missing depends on the allow_no_indices + // option. At some point we should change this, because there shouldn't be a reason why whether a single index + // or multiple indices are specified yield different behaviour. + final boolean failNoIndices = indexExpressions.length == 1 ? !options.allowNoIndices() : !options.ignoreUnavailable(); + List expressions = Arrays.asList(indexExpressions); + for (ExpressionResolver expressionResolver : expressionResolvers) { + expressions = expressionResolver.resolve(context, expressions); + } + + if (expressions.isEmpty()) { + if (!options.allowNoIndices()) { + IndexNotFoundException infe; + if (indexExpressions.length == 1) { + if (indexExpressions[0].equals(Metadata.ALL)) { + infe = new IndexNotFoundException("no indices exist", (String) null); + } else { + infe = new IndexNotFoundException((String) null); + } + } else { + infe = new IndexNotFoundException((String) null); + } + infe.setResources("index_expression", indexExpressions); + throw infe; + } else { + return Index.EMPTY_ARRAY; + } + } + + boolean excludedDataStreams = false; + final Set concreteIndices = new HashSet<>(expressions.size()); + for (String expression : expressions) { + IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(expression); + if (indexAbstraction == null) { + if (failNoIndices) { + IndexNotFoundException infe; + if (expression.equals(Metadata.ALL)) { + infe = new IndexNotFoundException("no indices exist", expression); + } else { + infe = new IndexNotFoundException(expression); + } + infe.setResources("index_expression", expression); + throw infe; + } else { + continue; + } + } else if (indexAbstraction.getType() == IndexAbstraction.Type.ALIAS && context.getOptions().ignoreAliases()) { + if (failNoIndices) { + throw aliasesNotSupportedException(expression); + } else { + continue; + } + } else if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM && context.includeDataStreams() == false) { + excludedDataStreams = true; + continue; + } + + if (indexAbstraction.getType() == IndexAbstraction.Type.ALIAS && context.isResolveToWriteIndex()) { + IndexMetadata writeIndex = indexAbstraction.getWriteIndex(); + if (writeIndex == null) { + throw new IllegalArgumentException( + "no write index is defined for alias [" + + indexAbstraction.getName() + + "]." + + " The write index may be explicitly disabled using is_write_index=false or the alias points to multiple" + + " indices without one being designated as a write index" + ); + } + if (addIndex(writeIndex, context)) { + concreteIndices.add(writeIndex.getIndex()); + } + } else if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM && context.isResolveToWriteIndex()) { + IndexMetadata writeIndex = indexAbstraction.getWriteIndex(); + if (addIndex(writeIndex, context)) { + concreteIndices.add(writeIndex.getIndex()); + } + } else { + if (indexAbstraction.getIndices().size() > 1 && !options.allowAliasesToMultipleIndices()) { + String[] indexNames = new String[indexAbstraction.getIndices().size()]; + int i = 0; + for (IndexMetadata indexMetadata : indexAbstraction.getIndices()) { + indexNames[i++] = indexMetadata.getIndex().getName(); + } + throw new IllegalArgumentException( + indexAbstraction.getType().getDisplayName() + + " [" + + expression + + "] has more than one index associated with it " + + Arrays.toString(indexNames) + + ", can't execute a single index op" + ); + } + + for (IndexMetadata index : indexAbstraction.getIndices()) { + if (shouldTrackConcreteIndex(context, options, index)) { + concreteIndices.add(index.getIndex()); + } + } + } + } + + if (options.allowNoIndices() == false && concreteIndices.isEmpty()) { + IndexNotFoundException infe = new IndexNotFoundException((String) null); + infe.setResources("index_expression", indexExpressions); + if (excludedDataStreams) { + // Allows callers to handle IndexNotFoundException differently based on whether data streams were excluded. + infe.addMetadata(EXCLUDED_DATA_STREAMS_KEY, "true"); + } + throw infe; + } + checkSystemIndexAccess(context, metadata, concreteIndices, indexExpressions); + return concreteIndices.toArray(new Index[0]); + } + + private void checkSystemIndexAccess(Context context, Metadata metadata, Set concreteIndices, String[] originalPatterns) { + if (context.isSystemIndexAccessAllowed() == false) { + final List resolvedSystemIndices = concreteIndices.stream() + .map(metadata::index) + .filter(IndexMetadata::isSystem) + .map(i -> i.getIndex().getName()) + .sorted() // reliable order for testing + .collect(Collectors.toList()); + if (resolvedSystemIndices.isEmpty() == false) { + resolvedSystemIndices.forEach( + systemIndexName -> deprecationLogger.deprecate( + "open_system_index_access_" + systemIndexName, + "this request accesses system indices: [{}], but in a future major version, direct access to system " + + "indices will be prevented by default", + systemIndexName + ) + ); + } + } + } + + private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions options, IndexMetadata index) { + if (index.getState() == IndexMetadata.State.CLOSE) { + if (options.forbidClosedIndices() && options.ignoreUnavailable() == false) { + throw new IndexClosedException(index.getIndex()); + } else { + return options.forbidClosedIndices() == false && addIndex(index, context); + } + } else if (index.getState() == IndexMetadata.State.OPEN) { + return addIndex(index, context); + } else { + throw new IllegalStateException("index state [" + index.getState() + "] not supported"); + } + } + + private static boolean addIndex(IndexMetadata metadata, Context context) { + // This used to check the `index.search.throttled` setting, but we eventually decided that it was + // trappy to hide throttled indices by default. In order to avoid breaking backward compatibility, + // we changed it to look at the `index.frozen` setting instead, since frozen indices were the only + // type of index to use the `search_throttled` threadpool at that time. + // NOTE: The Setting object was defined in an external plugin prior to OpenSearch fork. + return (context.options.ignoreThrottled() && metadata.getSettings().getAsBoolean("index.frozen", false)) == false; + } + + private static IllegalArgumentException aliasesNotSupportedException(String expression) { + return new IllegalArgumentException( + "The provided expression [" + expression + "] matches an " + "alias, specify the corresponding concrete indices instead." + ); + } + + /** + * Utility method that allows to resolve an index expression to its corresponding single concrete index. + * Callers should make sure they provide proper {@link org.opensearch.action.support.IndicesOptions} + * that require a single index as a result. The indices resolution must in fact return a single index when + * using this method, an {@link IllegalArgumentException} gets thrown otherwise. + * + * @param state the cluster state containing all the data to resolve to expression to a concrete index + * @param request The request that defines how the an alias or an index need to be resolved to a concrete index + * and the expression that can be resolved to an alias or an index name. + * @throws IllegalArgumentException if the index resolution lead to more than one index + * @return the concrete index obtained as a result of the index resolution + */ + public Index concreteSingleIndex(ProtobufClusterState state, IndicesRequest request) { + String indexExpression = CollectionUtils.isEmpty(request.indices()) ? null : request.indices()[0]; + Index[] indices = concreteIndices(state, request.indicesOptions(), indexExpression); + if (indices.length != 1) { + throw new IllegalArgumentException( + "unable to return a single index as the index and options" + " provided got resolved to multiple indices" + ); + } + return indices[0]; + } + + /** + * Utility method that allows to resolve an index expression to its corresponding single write index. + * + * @param state the cluster state containing all the data to resolve to expression to a concrete index + * @param request The request that defines how the an alias or an index need to be resolved to a concrete index + * and the expression that can be resolved to an alias or an index name. + * @throws IllegalArgumentException if the index resolution does not lead to an index, or leads to more than one index + * @return the write index obtained as a result of the index resolution + */ + public Index concreteWriteIndex(ProtobufClusterState state, IndicesRequest request) { + if (request.indices() == null || (request.indices() != null && request.indices().length != 1)) { + throw new IllegalArgumentException("indices request must specify a single index expression"); + } + return concreteWriteIndex(state, request.indicesOptions(), request.indices()[0], false, request.includeDataStreams()); + } + + /** + * Utility method that allows to resolve an index expression to its corresponding single write index. + * + * @param state the cluster state containing all the data to resolve to expression to a concrete index + * @param options defines how the aliases or indices need to be resolved to concrete indices + * @param index index that can be resolved to alias or index name. + * @param allowNoIndices whether to allow resolve to no index + * @param includeDataStreams Whether data streams should be included in the evaluation. + * @throws IllegalArgumentException if the index resolution does not lead to an index, or leads to more than one index + * @return the write index obtained as a result of the index resolution or null if no index + */ + public Index concreteWriteIndex( + ProtobufClusterState state, + IndicesOptions options, + String index, + boolean allowNoIndices, + boolean includeDataStreams + ) { + IndicesOptions combinedOptions = IndicesOptions.fromOptions( + options.ignoreUnavailable(), + allowNoIndices, + options.expandWildcardsOpen(), + options.expandWildcardsClosed(), + options.expandWildcardsHidden(), + options.allowAliasesToMultipleIndices(), + options.forbidClosedIndices(), + options.ignoreAliases(), + options.ignoreThrottled() + ); + + Context context = new Context(state, combinedOptions, false, true, includeDataStreams, isSystemIndexAccessAllowed()); + Index[] indices = concreteIndices(context, index); + if (allowNoIndices && indices.length == 0) { + return null; + } + if (indices.length != 1) { + throw new IllegalArgumentException( + "The index expression [" + index + "] and options provided did not point to a single write-index" + ); + } + return indices[0]; + } + + /** + * @return whether the specified index, data stream or alias exists. + * If the data stream, index or alias contains date math then that is resolved too. + */ + public boolean hasIndexAbstraction(String indexAbstraction, ProtobufClusterState state) { + Context context = new Context(state, IndicesOptions.lenientExpandOpen(), false, false, true, isSystemIndexAccessAllowed()); + String resolvedAliasOrIndex = dateMathExpressionResolver.resolveExpression(indexAbstraction, context); + return state.metadata().getIndicesLookup().containsKey(resolvedAliasOrIndex); + } + + /** + * @return If the specified string is data math expression then this method returns the resolved expression. + */ + public String resolveDateMathExpression(String dateExpression) { + // The data math expression resolver doesn't rely on cluster state or indices options, because + // it just resolves the date math to an actual date. + return dateMathExpressionResolver.resolveExpression(dateExpression, new Context(null, null, isSystemIndexAccessAllowed())); + } + + /** + * Resolve an array of expressions to the set of indices and aliases that these expressions match. + */ + public Set resolveExpressions(ProtobufClusterState state, String... expressions) { + Context context = new Context(state, IndicesOptions.lenientExpandOpen(), true, false, true, isSystemIndexAccessAllowed()); + List resolvedExpressions = Arrays.asList(expressions); + for (ExpressionResolver expressionResolver : expressionResolvers) { + resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions); + } + return Collections.unmodifiableSet(new HashSet<>(resolvedExpressions)); + } + + /** + * Iterates through the list of indices and selects the effective list of filtering aliases for the + * given index. + *

Only aliases with filters are returned. If the indices list contains a non-filtering reference to + * the index itself - null is returned. Returns {@code null} if no filtering is required. + * NOTE: The provided expressions must have been resolved already via {@link #resolveExpressions}. + */ + public String[] filteringAliases(ProtobufClusterState state, String index, Set resolvedExpressions) { + return indexAliases(state, index, AliasMetadata::filteringRequired, false, resolvedExpressions); + } + + /** + * Whether to generate the candidate set from index aliases, or from the set of resolved expressions. + * @param indexAliasesSize the number of aliases of the index + * @param resolvedExpressionsSize the number of resolved expressions + */ + // pkg-private for testing + boolean iterateIndexAliases(int indexAliasesSize, int resolvedExpressionsSize) { + return indexAliasesSize <= resolvedExpressionsSize; + } + + /** + * Iterates through the list of indices and selects the effective list of required aliases for the given index. + *

Only aliases where the given predicate tests successfully are returned. If the indices list contains a non-required reference to + * the index itself - null is returned. Returns {@code null} if no filtering is required. + *

NOTE: the provided expressions must have been resolved already via {@link #resolveExpressions}. + */ + public String[] indexAliases( + ProtobufClusterState state, + String index, + Predicate requiredAlias, + boolean skipIdentity, + Set resolvedExpressions + ) { + if (isAllIndices(resolvedExpressions)) { + return null; + } + + final IndexMetadata indexMetadata = state.metadata().getIndices().get(index); + if (indexMetadata == null) { + // Shouldn't happen + throw new IndexNotFoundException(index); + } + + if (skipIdentity == false && resolvedExpressions.contains(index)) { + return null; + } + + final ImmutableOpenMap indexAliases = indexMetadata.getAliases(); + final AliasMetadata[] aliasCandidates; + if (iterateIndexAliases(indexAliases.size(), resolvedExpressions.size())) { + // faster to iterate indexAliases + aliasCandidates = StreamSupport.stream(Spliterators.spliteratorUnknownSize(indexAliases.values().iterator(), 0), false) + .map(cursor -> cursor.value) + .filter(aliasMetadata -> resolvedExpressions.contains(aliasMetadata.alias())) + .toArray(AliasMetadata[]::new); + } else { + // faster to iterate resolvedExpressions + aliasCandidates = resolvedExpressions.stream().map(indexAliases::get).filter(Objects::nonNull).toArray(AliasMetadata[]::new); + } + + List aliases = null; + for (AliasMetadata aliasMetadata : aliasCandidates) { + if (requiredAlias.test(aliasMetadata)) { + // If required - add it to the list of aliases + if (aliases == null) { + aliases = new ArrayList<>(); + } + aliases.add(aliasMetadata.alias()); + } else { + // If not, we have a non required alias for this index - no further checking needed + return null; + } + } + if (aliases == null) { + return null; + } + return aliases.toArray(new String[0]); + } + + /** + * Resolves the search routing if in the expression aliases are used. If expressions point to concrete indices + * or aliases with no routing defined the specified routing is used. + * + * @return routing values grouped by concrete index + */ + public Map> resolveSearchRouting(ProtobufClusterState state, @Nullable String routing, String... expressions) { + List resolvedExpressions = expressions != null ? Arrays.asList(expressions) : Collections.emptyList(); + Context context = new Context(state, IndicesOptions.lenientExpandOpen(), false, false, true, isSystemIndexAccessAllowed()); + for (ExpressionResolver expressionResolver : expressionResolvers) { + resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions); + } + + // TODO: it appears that this can never be true? + if (isAllIndices(resolvedExpressions)) { + return resolveSearchRoutingAllIndices(state.metadata(), routing); + } + + Map> routings = null; + Set paramRouting = null; + // List of indices that don't require any routing + Set norouting = new HashSet<>(); + if (routing != null) { + paramRouting = Sets.newHashSet(Strings.splitStringByCommaToArray(routing)); + } + + for (String expression : resolvedExpressions) { + IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(expression); + if (indexAbstraction != null && indexAbstraction.getType() == IndexAbstraction.Type.ALIAS) { + IndexAbstraction.Alias alias = (IndexAbstraction.Alias) indexAbstraction; + for (Tuple item : alias.getConcreteIndexAndAliasMetadatas()) { + String concreteIndex = item.v1(); + AliasMetadata aliasMetadata = item.v2(); + if (!norouting.contains(concreteIndex)) { + if (!aliasMetadata.searchRoutingValues().isEmpty()) { + // Routing alias + if (routings == null) { + routings = new HashMap<>(); + } + Set r = routings.get(concreteIndex); + if (r == null) { + r = new HashSet<>(); + routings.put(concreteIndex, r); + } + r.addAll(aliasMetadata.searchRoutingValues()); + if (paramRouting != null) { + r.retainAll(paramRouting); + } + if (r.isEmpty()) { + routings.remove(concreteIndex); + } + } else { + // Non-routing alias + if (!norouting.contains(concreteIndex)) { + norouting.add(concreteIndex); + if (paramRouting != null) { + Set r = new HashSet<>(paramRouting); + if (routings == null) { + routings = new HashMap<>(); + } + routings.put(concreteIndex, r); + } else { + if (routings != null) { + routings.remove(concreteIndex); + } + } + } + } + } + } + } else { + // Index + if (!norouting.contains(expression)) { + norouting.add(expression); + if (paramRouting != null) { + Set r = new HashSet<>(paramRouting); + if (routings == null) { + routings = new HashMap<>(); + } + routings.put(expression, r); + } else { + if (routings != null) { + routings.remove(expression); + } + } + } + } + + } + if (routings == null || routings.isEmpty()) { + return null; + } + return routings; + } + + /** + * Sets the same routing for all indices + */ + public Map> resolveSearchRoutingAllIndices(Metadata metadata, String routing) { + if (routing != null) { + Set r = Sets.newHashSet(Strings.splitStringByCommaToArray(routing)); + Map> routings = new HashMap<>(); + String[] concreteIndices = metadata.getConcreteAllIndices(); + for (String index : concreteIndices) { + routings.put(index, r); + } + return routings; + } + return null; + } + + /** + * Identifies whether the array containing index names given as argument refers to all indices + * The empty or null array identifies all indices + * + * @param aliasesOrIndices the array containing index names + * @return true if the provided array maps to all indices, false otherwise + */ + public static boolean isAllIndices(Collection aliasesOrIndices) { + return aliasesOrIndices == null || aliasesOrIndices.isEmpty() || isExplicitAllPattern(aliasesOrIndices); + } + + /** + * Identifies whether the array containing index names given as argument explicitly refers to all indices + * The empty or null array doesn't explicitly map to all indices + * + * @param aliasesOrIndices the array containing index names + * @return true if the provided array explicitly maps to all indices, false otherwise + */ + static boolean isExplicitAllPattern(Collection aliasesOrIndices) { + return aliasesOrIndices != null && aliasesOrIndices.size() == 1 && Metadata.ALL.equals(aliasesOrIndices.iterator().next()); + } + + /** + * Identifies whether the first argument (an array containing index names) is a pattern that matches all indices + * + * @param indicesOrAliases the array containing index names + * @param concreteIndices array containing the concrete indices that the first argument refers to + * @return true if the first argument is a pattern that maps to all available indices, false otherwise + */ + boolean isPatternMatchingAllIndices(Metadata metadata, String[] indicesOrAliases, String[] concreteIndices) { + // if we end up matching on all indices, check, if its a wildcard parameter, or a "-something" structure + if (concreteIndices.length == metadata.getConcreteAllIndices().length && indicesOrAliases.length > 0) { + + // we might have something like /-test1,+test1 that would identify all indices + // or something like /-test1 with test1 index missing and IndicesOptions.lenient() + if (indicesOrAliases[0].charAt(0) == '-') { + return true; + } + + // otherwise we check if there's any simple regex + for (String indexOrAlias : indicesOrAliases) { + if (Regex.isSimpleMatchPattern(indexOrAlias)) { + return true; + } + } + } + return false; + } + + /** + * Determines whether or not system index access should be allowed in the current context. + * + * @return True if system index access should be allowed, false otherwise. + */ + public boolean isSystemIndexAccessAllowed() { + return Booleans.parseBoolean(threadContext.getHeader(SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY), true); + } + + /** + * Context for the resolver. + * + * @opensearch.internal + */ + public static class Context { + + private final ProtobufClusterState state; + private final IndicesOptions options; + private final long startTime; + private final boolean preserveAliases; + private final boolean resolveToWriteIndex; + private final boolean includeDataStreams; + private final boolean preserveDataStreams; + private final boolean isSystemIndexAccessAllowed; + + Context(ProtobufClusterState state, IndicesOptions options, boolean isSystemIndexAccessAllowed) { + this(state, options, System.currentTimeMillis(), isSystemIndexAccessAllowed); + } + + Context( + ProtobufClusterState state, + IndicesOptions options, + boolean preserveAliases, + boolean resolveToWriteIndex, + boolean includeDataStreams, + boolean isSystemIndexAccessAllowed + ) { + this( + state, + options, + System.currentTimeMillis(), + preserveAliases, + resolveToWriteIndex, + includeDataStreams, + false, + isSystemIndexAccessAllowed + ); + } + + Context( + ProtobufClusterState state, + IndicesOptions options, + boolean preserveAliases, + boolean resolveToWriteIndex, + boolean includeDataStreams, + boolean preserveDataStreams, + boolean isSystemIndexAccessAllowed + ) { + this( + state, + options, + System.currentTimeMillis(), + preserveAliases, + resolveToWriteIndex, + includeDataStreams, + preserveDataStreams, + isSystemIndexAccessAllowed + ); + } + + Context(ProtobufClusterState state, IndicesOptions options, long startTime, boolean isSystemIndexAccessAllowed) { + this(state, options, startTime, false, false, false, false, isSystemIndexAccessAllowed); + } + + protected Context( + ProtobufClusterState state, + IndicesOptions options, + long startTime, + boolean preserveAliases, + boolean resolveToWriteIndex, + boolean includeDataStreams, + boolean preserveDataStreams, + boolean isSystemIndexAccessAllowed + ) { + this.state = state; + this.options = options; + this.startTime = startTime; + this.preserveAliases = preserveAliases; + this.resolveToWriteIndex = resolveToWriteIndex; + this.includeDataStreams = includeDataStreams; + this.preserveDataStreams = preserveDataStreams; + this.isSystemIndexAccessAllowed = isSystemIndexAccessAllowed; + } + + public ProtobufClusterState getState() { + return state; + } + + public IndicesOptions getOptions() { + return options; + } + + public long getStartTime() { + return startTime; + } + + /** + * This is used to prevent resolving aliases to concrete indices but this also means + * that we might return aliases that point to a closed index. This is currently only used + * by {@link #filteringAliases(ProtobufClusterState, String, Set)} since it's the only one that needs aliases + */ + boolean isPreserveAliases() { + return preserveAliases; + } + + /** + * This is used to require that aliases resolve to their write-index. It is currently not used in conjunction + * with preserveAliases. + */ + boolean isResolveToWriteIndex() { + return resolveToWriteIndex; + } + + public boolean includeDataStreams() { + return includeDataStreams; + } + + public boolean isPreserveDataStreams() { + return preserveDataStreams; + } + + /** + * Used to determine if it is allowed to access system indices in this context (e.g. for this request). + */ + public boolean isSystemIndexAccessAllowed() { + return isSystemIndexAccessAllowed; + } + } + + private interface ExpressionResolver { + + /** + * Resolves the list of expressions into other expressions if possible (possible concrete indices and aliases, but + * that isn't required). The provided implementations can also be left untouched. + * + * @return a new list with expressions based on the provided expressions + */ + List resolve(Context context, List expressions); + + } + + /** + * Resolves alias/index name expressions with wildcards into the corresponding concrete indices/aliases + * + * @opensearch.internal + */ + static final class WildcardExpressionResolver implements ExpressionResolver { + + @Override + public List resolve(Context context, List expressions) { + IndicesOptions options = context.getOptions(); + Metadata metadata = context.getState().metadata(); + // only check open/closed since if we do not expand to open or closed it doesn't make sense to + // expand to hidden + if (options.expandWildcardsClosed() == false && options.expandWildcardsOpen() == false) { + return expressions; + } + + if (isEmptyOrTrivialWildcard(expressions)) { + List resolvedExpressions = resolveEmptyOrTrivialWildcard(options, metadata); + if (context.includeDataStreams()) { + final IndexMetadata.State excludeState = excludeState(options); + final Map dataStreamsAbstractions = metadata.getIndicesLookup() + .entrySet() + .stream() + .filter(entry -> entry.getValue().getType() == IndexAbstraction.Type.DATA_STREAM) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + // dedup backing indices if expand hidden indices option is true + Set resolvedIncludingDataStreams = new HashSet<>(resolvedExpressions); + resolvedIncludingDataStreams.addAll( + expand( + context, + excludeState, + dataStreamsAbstractions, + expressions.isEmpty() ? "_all" : expressions.get(0), + options.expandWildcardsHidden() + ) + ); + return new ArrayList<>(resolvedIncludingDataStreams); + } + return resolvedExpressions; + } + + Set result = innerResolve(context, expressions, options, metadata); + + if (result == null) { + return expressions; + } + if (result.isEmpty() && !options.allowNoIndices()) { + IndexNotFoundException infe = new IndexNotFoundException((String) null); + infe.setResources("index_or_alias", expressions.toArray(new String[0])); + throw infe; + } + return new ArrayList<>(result); + } + + private Set innerResolve(Context context, List expressions, IndicesOptions options, Metadata metadata) { + Set result = null; + boolean wildcardSeen = false; + for (int i = 0; i < expressions.size(); i++) { + String expression = expressions.get(i); + if (Strings.isEmpty(expression)) { + throw indexNotFoundException(expression); + } + validateAliasOrIndex(expression); + if (aliasOrIndexExists(context, options, metadata, expression)) { + if (result != null) { + result.add(expression); + } + continue; + } + final boolean add; + if (expression.charAt(0) == '-' && wildcardSeen) { + add = false; + expression = expression.substring(1); + } else { + add = true; + } + if (result == null) { + // add all the previous ones... + result = new HashSet<>(expressions.subList(0, i)); + } + if (Regex.isSimpleMatchPattern(expression) == false) { + // TODO why does wildcard resolver throw exceptions regarding non wildcarded expressions? This should not be done here. + if (options.ignoreUnavailable() == false) { + IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(expression); + if (indexAbstraction == null) { + throw indexNotFoundException(expression); + } else if (indexAbstraction.getType() == IndexAbstraction.Type.ALIAS && options.ignoreAliases()) { + throw aliasesNotSupportedException(expression); + } else if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM + && context.includeDataStreams() == false) { + throw indexNotFoundException(expression); + } + } + if (add) { + result.add(expression); + } else { + result.remove(expression); + } + continue; + } + + final IndexMetadata.State excludeState = excludeState(options); + final Map matches = matches(context, metadata, expression); + Set expand = expand(context, excludeState, matches, expression, options.expandWildcardsHidden()); + if (add) { + result.addAll(expand); + } else { + result.removeAll(expand); + } + if (options.allowNoIndices() == false && matches.isEmpty()) { + throw indexNotFoundException(expression); + } + if (Regex.isSimpleMatchPattern(expression)) { + wildcardSeen = true; + } + } + return result; + } + + private static void validateAliasOrIndex(String expression) { + // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API + // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, + // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown + // if the expression can't be found. + if (expression.charAt(0) == '_') { + throw new InvalidIndexNameException(expression, "must not start with '_'."); + } + } + + private static boolean aliasOrIndexExists(Context context, IndicesOptions options, Metadata metadata, String expression) { + IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(expression); + if (indexAbstraction == null) { + return false; + } + + // treat aliases as unavailable indices when ignoreAliases is set to true (e.g. delete index and update aliases api) + if (indexAbstraction.getType() == IndexAbstraction.Type.ALIAS && options.ignoreAliases()) { + return false; + } + + if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM && context.includeDataStreams() == false) { + return false; + } + + return true; + } + + private static IndexNotFoundException indexNotFoundException(String expression) { + IndexNotFoundException infe = new IndexNotFoundException(expression); + infe.setResources("index_or_alias", expression); + return infe; + } + + private static IndexMetadata.State excludeState(IndicesOptions options) { + final IndexMetadata.State excludeState; + if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) { + excludeState = null; + } else if (options.expandWildcardsOpen() && options.expandWildcardsClosed() == false) { + excludeState = IndexMetadata.State.CLOSE; + } else if (options.expandWildcardsClosed() && options.expandWildcardsOpen() == false) { + excludeState = IndexMetadata.State.OPEN; + } else { + assert false : "this shouldn't get called if wildcards expand to none"; + excludeState = null; + } + return excludeState; + } + + public static Map matches(Context context, Metadata metadata, String expression) { + if (Regex.isMatchAllPattern(expression)) { + return filterIndicesLookup(context, metadata.getIndicesLookup(), null, context.getOptions()); + } else if (expression.indexOf("*") == expression.length() - 1) { + return suffixWildcard(context, metadata, expression); + } else { + return otherWildcard(context, metadata, expression); + } + } + + private static Map suffixWildcard(Context context, Metadata metadata, String expression) { + assert expression.length() >= 2 : "expression [" + expression + "] should have at least a length of 2"; + String fromPrefix = expression.substring(0, expression.length() - 1); + char[] toPrefixCharArr = fromPrefix.toCharArray(); + toPrefixCharArr[toPrefixCharArr.length - 1]++; + String toPrefix = new String(toPrefixCharArr); + SortedMap subMap = metadata.getIndicesLookup().subMap(fromPrefix, toPrefix); + return filterIndicesLookup(context, subMap, null, context.getOptions()); + } + + private static Map otherWildcard(Context context, Metadata metadata, String expression) { + final String pattern = expression; + return filterIndicesLookup( + context, + metadata.getIndicesLookup(), + e -> Regex.simpleMatch(pattern, e.getKey()), + context.getOptions() + ); + } + + private static Map filterIndicesLookup( + Context context, + SortedMap indicesLookup, + Predicate> filter, + IndicesOptions options + ) { + boolean shouldConsumeStream = false; + Stream> stream = indicesLookup.entrySet().stream(); + if (options.ignoreAliases()) { + shouldConsumeStream = true; + stream = stream.filter(e -> e.getValue().getType() != IndexAbstraction.Type.ALIAS); + } + if (filter != null) { + shouldConsumeStream = true; + stream = stream.filter(filter); + } + if (context.includeDataStreams() == false) { + shouldConsumeStream = true; + stream = stream.filter(e -> e.getValue().getType() != IndexAbstraction.Type.DATA_STREAM); + } + if (shouldConsumeStream) { + return stream.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } else { + return indicesLookup; + } + } + + private static Set expand( + Context context, + IndexMetadata.State excludeState, + Map matches, + String expression, + boolean includeHidden + ) { + Set expand = new HashSet<>(); + for (Map.Entry entry : matches.entrySet()) { + String aliasOrIndexName = entry.getKey(); + IndexAbstraction indexAbstraction = entry.getValue(); + + if (indexAbstraction.isHidden() == false || includeHidden || implicitHiddenMatch(aliasOrIndexName, expression)) { + if (context.isPreserveAliases() && indexAbstraction.getType() == IndexAbstraction.Type.ALIAS) { + expand.add(aliasOrIndexName); + } else { + for (IndexMetadata meta : indexAbstraction.getIndices()) { + if (excludeState == null || meta.getState() != excludeState) { + expand.add(meta.getIndex().getName()); + } + } + if (context.isPreserveDataStreams() && indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { + expand.add(indexAbstraction.getName()); + } + } + } + } + return expand; + } + + private static boolean implicitHiddenMatch(String itemName, String expression) { + return itemName.startsWith(".") && expression.startsWith(".") && Regex.isSimpleMatchPattern(expression); + } + + private boolean isEmptyOrTrivialWildcard(List expressions) { + return expressions.isEmpty() + || (expressions.size() == 1 && (Metadata.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0)))); + } + + private static List resolveEmptyOrTrivialWildcard(IndicesOptions options, Metadata metadata) { + if (options.expandWildcardsOpen() && options.expandWildcardsClosed() && options.expandWildcardsHidden()) { + return Arrays.asList(metadata.getConcreteAllIndices()); + } else if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) { + return Arrays.asList(metadata.getConcreteVisibleIndices()); + } else if (options.expandWildcardsOpen() && options.expandWildcardsHidden()) { + return Arrays.asList(metadata.getConcreteAllOpenIndices()); + } else if (options.expandWildcardsOpen()) { + return Arrays.asList(metadata.getConcreteVisibleOpenIndices()); + } else if (options.expandWildcardsClosed() && options.expandWildcardsHidden()) { + return Arrays.asList(metadata.getConcreteAllClosedIndices()); + } else if (options.expandWildcardsClosed()) { + return Arrays.asList(metadata.getConcreteVisibleClosedIndices()); + } else { + return Collections.emptyList(); + } + } + } + + /** + * A date math expression resolver. + * + * @opensearch.internal + */ + public static final class DateMathExpressionResolver implements ExpressionResolver { + + private static final DateFormatter DEFAULT_DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd"); + private static final String EXPRESSION_LEFT_BOUND = "<"; + private static final String EXPRESSION_RIGHT_BOUND = ">"; + private static final char LEFT_BOUND = '{'; + private static final char RIGHT_BOUND = '}'; + private static final char ESCAPE_CHAR = '\\'; + private static final char TIME_ZONE_BOUND = '|'; + + @Override + public List resolve(final Context context, List expressions) { + List result = new ArrayList<>(expressions.size()); + for (String expression : expressions) { + result.add(resolveExpression(expression, context)); + } + return result; + } + + @SuppressWarnings("fallthrough") + String resolveExpression(String expression, final Context context) { + if (expression.startsWith(EXPRESSION_LEFT_BOUND) == false || expression.endsWith(EXPRESSION_RIGHT_BOUND) == false) { + return expression; + } + + boolean escape = false; + boolean inDateFormat = false; + boolean inPlaceHolder = false; + final StringBuilder beforePlaceHolderSb = new StringBuilder(); + StringBuilder inPlaceHolderSb = new StringBuilder(); + final char[] text = expression.toCharArray(); + final int from = 1; + final int length = text.length - 1; + for (int i = from; i < length; i++) { + boolean escapedChar = escape; + if (escape) { + escape = false; + } + + char c = text[i]; + if (c == ESCAPE_CHAR) { + if (escapedChar) { + beforePlaceHolderSb.append(c); + escape = false; + } else { + escape = true; + } + continue; + } + if (inPlaceHolder) { + switch (c) { + case LEFT_BOUND: + if (inDateFormat && escapedChar) { + inPlaceHolderSb.append(c); + } else if (!inDateFormat) { + inDateFormat = true; + inPlaceHolderSb.append(c); + } else { + throw new OpenSearchParseException( + "invalid dynamic name expression [{}]." + " invalid character in placeholder at position [{}]", + new String(text, from, length), + i + ); + } + break; + + case RIGHT_BOUND: + if (inDateFormat && escapedChar) { + inPlaceHolderSb.append(c); + } else if (inDateFormat) { + inDateFormat = false; + inPlaceHolderSb.append(c); + } else { + String inPlaceHolderString = inPlaceHolderSb.toString(); + int dateTimeFormatLeftBoundIndex = inPlaceHolderString.indexOf(LEFT_BOUND); + String mathExpression; + String dateFormatterPattern; + DateFormatter dateFormatter; + final ZoneId timeZone; + if (dateTimeFormatLeftBoundIndex < 0) { + mathExpression = inPlaceHolderString; + dateFormatter = DEFAULT_DATE_FORMATTER; + timeZone = ZoneOffset.UTC; + } else { + if (inPlaceHolderString.lastIndexOf(RIGHT_BOUND) != inPlaceHolderString.length() - 1) { + throw new OpenSearchParseException( + "invalid dynamic name expression [{}]. missing closing `}`" + " for date math format", + inPlaceHolderString + ); + } + if (dateTimeFormatLeftBoundIndex == inPlaceHolderString.length() - 2) { + throw new OpenSearchParseException( + "invalid dynamic name expression [{}]. missing date format", + inPlaceHolderString + ); + } + mathExpression = inPlaceHolderString.substring(0, dateTimeFormatLeftBoundIndex); + String patternAndTZid = inPlaceHolderString.substring( + dateTimeFormatLeftBoundIndex + 1, + inPlaceHolderString.length() - 1 + ); + int formatPatternTimeZoneSeparatorIndex = patternAndTZid.indexOf(TIME_ZONE_BOUND); + if (formatPatternTimeZoneSeparatorIndex != -1) { + dateFormatterPattern = patternAndTZid.substring(0, formatPatternTimeZoneSeparatorIndex); + timeZone = DateUtils.of(patternAndTZid.substring(formatPatternTimeZoneSeparatorIndex + 1)); + } else { + dateFormatterPattern = patternAndTZid; + timeZone = ZoneOffset.UTC; + } + dateFormatter = DateFormatter.forPattern(dateFormatterPattern); + } + + DateFormatter formatter = dateFormatter.withZone(timeZone); + DateMathParser dateMathParser = formatter.toDateMathParser(); + Instant instant = dateMathParser.parse(mathExpression, context::getStartTime, false, timeZone); + + String time = formatter.format(instant); + beforePlaceHolderSb.append(time); + inPlaceHolderSb = new StringBuilder(); + inPlaceHolder = false; + } + break; + + default: + inPlaceHolderSb.append(c); + } + } else { + switch (c) { + case LEFT_BOUND: + if (escapedChar) { + beforePlaceHolderSb.append(c); + } else { + inPlaceHolder = true; + } + break; + + case RIGHT_BOUND: + if (!escapedChar) { + throw new OpenSearchParseException( + "invalid dynamic name expression [{}]." + + " invalid character at position [{}]. `{` and `}` are reserved characters and" + + " should be escaped when used as part of the index name using `\\` (e.g. `\\{text\\}`)", + new String(text, from, length), + i + ); + } + default: + beforePlaceHolderSb.append(c); + } + } + } + + if (inPlaceHolder) { + throw new OpenSearchParseException( + "invalid dynamic name expression [{}]. date math placeholder is open ended", + new String(text, from, length) + ); + } + if (beforePlaceHolderSb.length() == 0) { + throw new OpenSearchParseException("nothing captured"); + } + return beforePlaceHolderSb.toString(); + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index c03f5ae619edf..57db883a337e8 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.LocalNodeClusterManagerListener; import org.opensearch.cluster.LocalNodeMasterListener; import org.opensearch.cluster.NodeConnectionsService; +import org.opensearch.cluster.ProtobufClusterState; import org.opensearch.cluster.TimeoutClusterStateListener; import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.opensearch.cluster.node.DiscoveryNodes; @@ -114,6 +115,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements private final Map timeoutClusterStateListeners = new ConcurrentHashMap<>(); private final AtomicReference state; // last applied state + private final AtomicReference protobufState; // last applied state private final String nodeName; @@ -123,6 +125,7 @@ public ClusterApplierService(String nodeName, Settings settings, ClusterSettings this.clusterSettings = clusterSettings; this.threadPool = threadPool; this.state = new AtomicReference<>(); + this.protobufState = new AtomicReference<>(); this.nodeName = nodeName; this.slowTaskLoggingThreshold = CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); @@ -214,6 +217,17 @@ public ClusterState state() { return clusterState; } + /** + * The current cluster state. + * Should be renamed to appliedClusterState + */ + public ProtobufClusterState protobufState() { + assert assertNotCalledFromClusterStateApplier("the applied cluster state is not yet available"); + ProtobufClusterState clusterState = this.protobufState.get(); + assert clusterState != null : "initial cluster state not set yet"; + return clusterState; + } + /** * Returns true if the appliedClusterState is not null */ diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index e097803d86b48..385d62b1895cc 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.LocalNodeClusterManagerListener; import org.opensearch.cluster.LocalNodeMasterListener; import org.opensearch.cluster.NodeConnectionsService; +import org.opensearch.cluster.ProtobufClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.cluster.routing.RerouteService; @@ -170,6 +171,14 @@ public ClusterState state() { return clusterApplierService.state(); } + /** + * The currently applied cluster state. + * TODO: Should be renamed to appliedState / appliedClusterState + */ + public ProtobufClusterState protobufState() { + return clusterApplierService.protobufState(); + } + /** * Adds a high priority applier of updated cluster states. */ diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index c052af41d66d9..c9ed9d59cda05 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -58,6 +58,9 @@ import org.opensearch.tasks.RawTaskStatus; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ProtobufTransportInterceptor; +import org.opensearch.transport.ProtobufTransportRequest; +import org.opensearch.transport.ProtobufTransportRequestHandler; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; @@ -131,6 +134,7 @@ public final class NetworkModule { private final Map> transportFactories = new HashMap<>(); private final Map> transportHttpFactories = new HashMap<>(); private final List transportIntercetors = new ArrayList<>(); + private final List protobufTransportInterceptors = new ArrayList<>(); /** * Creates a network module that custom networking classes can be plugged into. @@ -300,4 +304,48 @@ public AsyncSender interceptSender(AsyncSender sender) { } } + /** + * Registers a new {@link TransportInterceptor} + */ + private void registerProtobufTransportInterceptor(ProtobufTransportInterceptor interceptor) { + this.protobufTransportInterceptors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); + } + + /** + * Returns a composite {@link ProtobufTransportInterceptor} containing all registered interceptors + * @see #registerProtobufTransportInterceptor(ProtobufTransportInterceptor) + */ + public ProtobufTransportInterceptor getProtobufTransportInterceptor() { + return new ProtobufCompositeTransportInterceptor(this.protobufTransportInterceptors); + } + + static final class ProtobufCompositeTransportInterceptor implements ProtobufTransportInterceptor { + final List transportInterceptors; + + private ProtobufCompositeTransportInterceptor(List transportInterceptors) { + this.transportInterceptors = new ArrayList<>(transportInterceptors); + } + + @Override + public ProtobufTransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + ProtobufTransportRequestHandler actualHandler + ) { + for (ProtobufTransportInterceptor interceptor : this.transportInterceptors) { + actualHandler = interceptor.interceptHandler(action, executor, forceExecution, actualHandler); + } + return actualHandler; + } + + @Override + public AsyncSender interceptSender(AsyncSender sender) { + for (ProtobufTransportInterceptor interceptor : this.transportInterceptors) { + sender = interceptor.interceptSender(sender); + } + return sender; + } + } + } diff --git a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java index dd5502054684a..4308e1c3e98fa 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java @@ -233,6 +233,7 @@ public boolean registerDynamicSetting(Setting setting) { * the setting during startup. */ private void registerSetting(Setting setting) { + System.out.println("Registering setting: " + setting.getKey()); if (setting.isFiltered()) { if (settingsFilterPattern.contains(setting.getKey()) == false) { registerSettingsFilter(setting.getKey()); diff --git a/server/src/main/java/org/opensearch/common/unit/ProtobufSizeValue.java b/server/src/main/java/org/opensearch/common/unit/ProtobufSizeValue.java index 201b74d4a83c3..b17e0606eccd2 100644 --- a/server/src/main/java/org/opensearch/common/unit/ProtobufSizeValue.java +++ b/server/src/main/java/org/opensearch/common/unit/ProtobufSizeValue.java @@ -46,6 +46,14 @@ public void writeTo(CodedOutputStream out) throws IOException { out.writeInt64NoTag(singles()); } + public long getSize() { + return size; + } + + public SizeUnit getSizeUnit() { + return sizeUnit; + } + public long singles() { return sizeUnit.toSingles(size); } diff --git a/server/src/main/java/org/opensearch/common/unit/SizeValue.java b/server/src/main/java/org/opensearch/common/unit/SizeValue.java index 766199ebbc8f8..7e499f5860a7f 100644 --- a/server/src/main/java/org/opensearch/common/unit/SizeValue.java +++ b/server/src/main/java/org/opensearch/common/unit/SizeValue.java @@ -72,6 +72,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(singles()); } + public long getSize() { + return size; + } + + public SizeUnit getSizeUnit() { + return sizeUnit; + } + public long singles() { return sizeUnit.toSingles(size); } diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index 0ba49be01d193..1d967d9fb135a 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -368,6 +368,7 @@ void dispatchRequest(final RestRequest restRequest, final RestChannel channel, f } private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel, final Exception exception) { + System.out.println("HttpServerTransport.handleIncomingRequest"); if (exception == null) { HttpResponse earlyResponse = corsHandler.handleInbound(httpRequest); if (earlyResponse != null) { @@ -441,6 +442,7 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan channel = innerChannel; } + System.out.println("RestRequest " + restRequest); dispatchRequest(restRequest, channel, badRequestCause); } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index d8b51b0184a66..e77e82368bd9b 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -78,17 +78,22 @@ import org.opensearch.Version; import org.opensearch.action.ActionModule; import org.opensearch.action.ActionModule.DynamicActionRegistry; +import org.opensearch.action.ActionModule.ProtobufDynamicActionRegistry; import org.opensearch.action.ActionType; +import org.opensearch.action.ProtobufActionType; import org.opensearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus; import org.opensearch.action.search.SearchExecutionStatsCollector; import org.opensearch.action.search.SearchPhaseController; import org.opensearch.action.search.SearchTransportService; +import org.opensearch.action.support.ProtobufTransportAction; import org.opensearch.action.support.TransportAction; import org.opensearch.action.update.UpdateHelper; import org.opensearch.bootstrap.BootstrapCheck; import org.opensearch.bootstrap.BootstrapContext; import org.opensearch.client.Client; +import org.opensearch.client.ProtobufClient; import org.opensearch.client.node.NodeClient; +import org.opensearch.client.node.ProtobufNodeClient; import org.opensearch.cluster.ClusterInfoService; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; @@ -107,6 +112,7 @@ import org.opensearch.cluster.metadata.TemplateUpgradeService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.cluster.routing.BatchedRerouteService; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.DiskThresholdMonitor; @@ -134,6 +140,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; @@ -195,6 +202,7 @@ import org.opensearch.plugins.PersistentTaskPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginsService; +import org.opensearch.plugins.ProtobufActionPlugin; import org.opensearch.plugins.RepositoryPlugin; import org.opensearch.plugins.ScriptPlugin; import org.opensearch.plugins.SearchPlugin; @@ -222,6 +230,8 @@ import org.opensearch.tasks.TaskResultsService; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ProtobufTransportInterceptor; +import org.opensearch.transport.ProtobufTransportService; import org.opensearch.transport.RemoteClusterService; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; @@ -376,9 +386,12 @@ public static class DiscoverySettings { private final PluginsService pluginsService; private final ExtensionsManager extensionsManager; private final NodeClient client; + private final ProtobufNodeClient protobufClient; private final Collection pluginLifecycleComponents; private final LocalNodeFactory localNodeFactory; + private final ProtobufLocalNodeFactory protobufLocalNodeFactory; private final NodeService nodeService; + private final ProtobufNodeService protobufNodeService; private final Tracer tracer; final NamedWriteableRegistry namedWriteableRegistry; private final AtomicReference runnableTaskListener; @@ -515,6 +528,7 @@ protected Node( ); resourcesToClose.add(nodeEnvironment); localNodeFactory = new LocalNodeFactory(settings, nodeEnvironment.nodeId()); + protobufLocalNodeFactory = new ProtobufLocalNodeFactory(settings, nodeEnvironment.nodeId()); final List> executorBuilders = pluginsService.getExecutorBuilders(settings); @@ -540,6 +554,19 @@ protected Node( } client = new NodeClient(settings, threadPool); + // final ProtobufThreadPool protobufThreadPool = new ProtobufThreadPool(settings, runnableTaskListener, executorBuilders.toArray(new ProtobufExecutorBuilder[0])); + // resourcesToClose.add(() -> ProtobufThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); + // final ResourceWatcherService resourceWatcherServiceProtobuf = new ResourceWatcherService(settings, protobufThreadPool); + // resourcesToClose.add(resourceWatcherServiceProtobuf); + // // adds the context to the DeprecationLogger so that it does not need to be injected everywhere + // HeaderWarning.setThreadContext(protobufThreadPool.getThreadContext()); + // resourcesToClose.add(() -> HeaderWarning.removeThreadContext(protobufThreadPool.getThreadContext())); + + // for (final ProtobufExecutorBuilder builder : protobufThreadPool.builders()) { + // additionalSettings.addAll(builder.getRegisteredSettings()); + // } + protobufClient = new ProtobufNodeClient(settings, threadPool); + final ScriptModule scriptModule = new ScriptModule(settings, pluginsService.filterPlugins(ScriptPlugin.class)); final ScriptService scriptService = newScriptService(settings, scriptModule.engines, scriptModule.contexts); AnalysisModule analysisModule = new AnalysisModule(this.environment, pluginsService.filterPlugins(AnalysisPlugin.class)); @@ -799,24 +826,42 @@ protected Node( ) .collect(Collectors.toList()); - ActionModule actionModule = new ActionModule( + // ActionModule actionModule = new ActionModule( + // settings, + // clusterModule.getIndexNameExpressionResolver(), + // settingsModule.getIndexScopedSettings(), + // settingsModule.getClusterSettings(), + // settingsModule.getSettingsFilter(), + // threadPool, + // pluginsService.filterPlugins(ActionPlugin.class), + // client, + // circuitBreakerService, + // usageService, + // systemIndices + // ); + // modules.add(actionModule); + + ActionModule protobufActionModule = new ActionModule( settings, clusterModule.getIndexNameExpressionResolver(), + clusterModule.getProtobufIndexNameExpressionResolver(), settingsModule.getIndexScopedSettings(), settingsModule.getClusterSettings(), settingsModule.getSettingsFilter(), threadPool, pluginsService.filterPlugins(ActionPlugin.class), client, + pluginsService.filterPlugins(ProtobufActionPlugin.class), + protobufClient, circuitBreakerService, usageService, systemIndices, identityService, extensionsManager ); - modules.add(actionModule); + modules.add(protobufActionModule); - final RestController restController = actionModule.getRestController(); + final RestController restController = protobufActionModule.getRestController(); final NetworkModule networkModule = new NetworkModule( settings, @@ -861,16 +906,27 @@ protected Node( settingsModule.getClusterSettings(), taskHeaders ); + final ProtobufTransportService protobufTransportService = newProtobufTransportService( + settings, + transport, + threadPool, + networkModule.getProtobufTransportInterceptor(), + protobufLocalNodeFactory, + settingsModule.getClusterSettings(), + taskHeaders + ); TopNSearchTasksLogger taskConsumer = new TopNSearchTasksLogger(settings, settingsModule.getClusterSettings()); transportService.getTaskManager().registerTaskResourceConsumer(taskConsumer); - this.extensionsManager.initializeServicesAndRestHandler( - actionModule, - settingsModule, - transportService, - clusterService, - environment.settings(), - client - ); + // if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + // this.extensionsManager.initializeServicesAndRestHandler( + // actionModule, + // settingsModule, + // transportService, + // clusterService, + // environment.settings(), + // client + // ); + // } final GatewayMetaState gatewayMetaState = new GatewayMetaState(); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); final SearchTransportService searchTransportService = new SearchTransportService( @@ -919,7 +975,7 @@ protected Node( clusterModule.getIndexNameExpressionResolver(), repositoryService, transportService, - actionModule.getActionFilters() + protobufActionModule.getActionFilters() ); SnapshotShardsService snapshotShardsService = new SnapshotShardsService( settings, @@ -933,7 +989,7 @@ protected Node( clusterService, transportService, snapshotShardsService, - actionModule.getActionFilters() + protobufActionModule.getActionFilters() ); RestoreService restoreService = new RestoreService( clusterService, @@ -1015,6 +1071,51 @@ protected Node( fileCache, taskCancellationMonitoringService ); + this.protobufNodeService = new ProtobufNodeService( + settings, + threadPool, + monitorService, + discoveryModule.getDiscovery(), + protobufTransportService, + indicesService, + pluginsService, + circuitBreakerService, + scriptService, + httpServerTransport, + ingestService, + clusterService, + settingsModule.getSettingsFilter(), + responseCollectorService, + searchTransportService, + indexingPressureService, + searchModule.getValuesSourceRegistry().getUsageService(), + searchBackpressureService, + searchPipelineService, + fileCache, + taskCancellationMonitoringService + ); + this.protobufNodeService = new ProtobufNodeService( + settings, + threadPool, + monitorService, + discoveryModule.getDiscovery(), + protobufTransportService, + indicesService, + pluginsService, + circuitBreakerService, + scriptService, + httpServerTransport, + ingestService, + clusterService, + settingsModule.getSettingsFilter(), + responseCollectorService, + searchTransportService, + indexingPressureService, + searchModule.getValuesSourceRegistry().getUsageService(), + searchBackpressureService, + searchPipelineService, + fileCache + ); final SearchService searchService = newSearchService( clusterService, @@ -1068,10 +1169,13 @@ protected Node( modules.add(b -> { b.bind(Node.class).toInstance(this); b.bind(NodeService.class).toInstance(nodeService); + b.bind(ProtobufNodeService.class).toInstance(protobufNodeService); b.bind(NamedXContentRegistry.class).toInstance(xContentRegistry); b.bind(PluginsService.class).toInstance(pluginsService); b.bind(Client.class).toInstance(client); b.bind(NodeClient.class).toInstance(client); + b.bind(ProtobufClient.class).toInstance(protobufClient); + b.bind(ProtobufNodeClient.class).toInstance(protobufClient); b.bind(Environment.class).toInstance(this.environment); b.bind(ExtensionsManager.class).toInstance(this.extensionsManager); b.bind(ThreadPool.class).toInstance(threadPool); @@ -1104,6 +1208,7 @@ protected Node( .toInstance(new SearchPhaseController(namedWriteableRegistry, searchService::aggReduceContextBuilder)); b.bind(Transport.class).toInstance(transport); b.bind(TransportService.class).toInstance(transportService); + b.bind(ProtobufTransportService.class).toInstance(protobufTransportService); b.bind(NetworkService.class).toInstance(networkService); b.bind(UpdateHelper.class).toInstance(new UpdateHelper(scriptService)); b.bind(MetadataIndexUpgradeService.class).toInstance(metadataIndexUpgradeService); @@ -1167,7 +1272,7 @@ protected Node( resourcesToClose.addAll(pluginLifecycleComponents); resourcesToClose.add(injector.getInstance(PeerRecoverySourceService.class)); this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents); - DynamicActionRegistry dynamicActionRegistry = actionModule.getDynamicActionRegistry(); + DynamicActionRegistry dynamicActionRegistry = protobufActionModule.getDynamicActionRegistry(); dynamicActionRegistry.registerUnmodifiableActionMap(injector.getInstance(new Key>() { })); client.initialize( @@ -1178,8 +1283,19 @@ protected Node( ); this.namedWriteableRegistry = namedWriteableRegistry; + ProtobufDynamicActionRegistry protobufDynamicActionRegistry = protobufActionModule.getProtobufDynamicActionRegistry(); + protobufDynamicActionRegistry.registerUnmodifiableActionMap(injector.getInstance(new Key>() { + })); + protobufClient.initialize( + protobufDynamicActionRegistry, + () -> clusterService.localNode().getId(), + protobufTransportService.getRemoteClusterService(), + namedWriteableRegistry + ); + logger.debug("initializing HTTP handlers ..."); - actionModule.initRestHandlers(() -> clusterService.state().nodes()); + protobufActionModule.initRestHandlers(() -> clusterService.state().nodes()); + protobufActionModule.initProtobufRestHandlers(); logger.info("initialized"); success = true; @@ -1204,6 +1320,18 @@ protected TransportService newTransportService( return new TransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); } + protected ProtobufTransportService newProtobufTransportService( + Settings settings, + Transport transport, + ThreadPool threadPool, + ProtobufTransportInterceptor interceptor, + Function localNodeFactory, + ClusterSettings clusterSettings, + Set taskHeaders + ) { + return new ProtobufTransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + } + protected void processRecoverySettings(ClusterSettings clusterSettings, RecoverySettings recoverySettings) { // Noop in production, overridden by tests } @@ -1721,6 +1849,28 @@ DiscoveryNode getNode() { } } + private static class ProtobufLocalNodeFactory implements Function { + private final SetOnce localNode = new SetOnce<>(); + private final String persistentNodeId; + private final Settings settings; + + private ProtobufLocalNodeFactory(Settings settings, String persistentNodeId) { + this.persistentNodeId = persistentNodeId; + this.settings = settings; + } + + @Override + public ProtobufDiscoveryNode apply(ProtobufBoundTransportAddress boundTransportAddress) { + localNode.set(ProtobufDiscoveryNode.createLocal(settings, boundTransportAddress.publishAddress(), persistentNodeId)); + return localNode.get(); + } + + ProtobufDiscoveryNode getNode() { + assert localNode.get() != null; + return localNode.get(); + } + } + /** * Initializes the search cache with a defined capacity. * The capacity of the cache is based on user configuration for {@link Node#NODE_SEARCH_CACHE_SIZE_SETTING}. diff --git a/server/src/main/java/org/opensearch/node/ProtobufNodeService.java b/server/src/main/java/org/opensearch/node/ProtobufNodeService.java new file mode 100644 index 0000000000000..b774178bb8122 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/ProtobufNodeService.java @@ -0,0 +1,238 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.node; + +import org.opensearch.cluster.routing.WeightedRoutingStats; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.Build; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodeInfo; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodeStats; +import org.opensearch.action.admin.indices.stats.ProtobufCommonStatsFlags; +import org.opensearch.action.search.SearchTransportService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Nullable; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.discovery.Discovery; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.store.remote.filecache.FileCache; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.ingest.IngestService; +import org.opensearch.monitor.MonitorService; +import org.opensearch.plugins.PluginsService; +import org.opensearch.script.ScriptService; +import org.opensearch.search.aggregations.support.AggregationUsageService; +import org.opensearch.search.backpressure.SearchBackpressureService; +import org.opensearch.search.pipeline.SearchPipelineService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ProtobufTransportService; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +/** + * Services exposed to nodes +* +* @opensearch.internal +*/ +public class ProtobufNodeService implements Closeable { + private final Settings settings; + private final ThreadPool threadPool; + private final MonitorService monitorService; + private final ProtobufTransportService transportService; + private final IndicesService indicesService; + private final PluginsService pluginService; + private final CircuitBreakerService circuitBreakerService; + private final IngestService ingestService; + private final SettingsFilter settingsFilter; + private final ScriptService scriptService; + private final HttpServerTransport httpServerTransport; + private final ResponseCollectorService responseCollectorService; + private final SearchTransportService searchTransportService; + private final IndexingPressureService indexingPressureService; + private final AggregationUsageService aggregationUsageService; + private final SearchBackpressureService searchBackpressureService; + private final SearchPipelineService searchPipelineService; + private final ClusterService clusterService; + private final Discovery discovery; + private final FileCache fileCache; + + ProtobufNodeService( + Settings settings, + ThreadPool threadPool, + MonitorService monitorService, + Discovery discovery, + ProtobufTransportService transportService, + IndicesService indicesService, + PluginsService pluginService, + CircuitBreakerService circuitBreakerService, + ScriptService scriptService, + @Nullable HttpServerTransport httpServerTransport, + IngestService ingestService, + ClusterService clusterService, + SettingsFilter settingsFilter, + ResponseCollectorService responseCollectorService, + SearchTransportService searchTransportService, + IndexingPressureService indexingPressureService, + AggregationUsageService aggregationUsageService, + SearchBackpressureService searchBackpressureService, + SearchPipelineService searchPipelineService, + FileCache fileCache + ) { + this.settings = settings; + this.threadPool = threadPool; + this.monitorService = monitorService; + this.transportService = transportService; + this.indicesService = indicesService; + this.discovery = discovery; + this.pluginService = pluginService; + this.circuitBreakerService = circuitBreakerService; + this.httpServerTransport = httpServerTransport; + this.ingestService = ingestService; + this.settingsFilter = settingsFilter; + this.scriptService = scriptService; + this.responseCollectorService = responseCollectorService; + this.searchTransportService = searchTransportService; + this.indexingPressureService = indexingPressureService; + this.aggregationUsageService = aggregationUsageService; + this.searchBackpressureService = searchBackpressureService; + this.searchPipelineService = searchPipelineService; + this.clusterService = clusterService; + this.fileCache = fileCache; + clusterService.addStateApplier(ingestService); + clusterService.addStateApplier(searchPipelineService); + } + + public ProtobufNodeInfo info( + boolean settings, + boolean os, + boolean process, + boolean jvm, + boolean threadPool, + boolean transport, + boolean http, + boolean plugin, + boolean ingest, + boolean aggs, + boolean indices, + boolean searchPipeline + ) { + ProtobufNodeInfo.Builder builder = ProtobufNodeInfo.builder(Version.CURRENT, Build.CURRENT, transportService.getLocalNode()); + if (settings) { + builder.setSettings(settingsFilter.filter(this.settings)); + } + // if (os) { + // builder.setOs(monitorService.osService().info()); + // } + // if (process) { + // builder.setProcess(monitorService.processService().info()); + // } + // if (jvm) { + // builder.setJvm(monitorService.jvmService().info()); + // } + if (threadPool) { + builder.setThreadPool(this.threadPool.protobufInfo()); + } + if (transport) { + builder.setTransport(transportService.protobufInfo()); + } + // if (http && httpServerTransport != null) { + // builder.setHttp(httpServerTransport.info()); + // } + // if (plugin && pluginService != null) { + // builder.setPlugins(pluginService.info()); + // } + // if (ingest && ingestService != null) { + // builder.setIngest(ingestService.info()); + // } + // if (aggs && aggregationUsageService != null) { + // builder.setAggsInfo(aggregationUsageService.info()); + // } + if (indices) { + builder.setTotalIndexingBuffer(indicesService.getTotalIndexingBufferBytes()); + } + // if (searchPipeline && searchPipelineService != null) { + // builder.setSearchPipelineInfo(searchPipelineService.info()); + // } + return builder.build(); + } + + public ProtobufNodeStats stats( + ProtobufCommonStatsFlags indices, + boolean os, + boolean process, + boolean jvm, + boolean threadPool, + boolean fs, + boolean transport, + boolean http, + boolean circuitBreaker, + boolean script, + boolean discoveryStats, + boolean ingest, + boolean adaptiveSelection, + boolean scriptCache, + boolean indexingPressure, + boolean shardIndexingPressure, + boolean searchBackpressure, + boolean clusterManagerThrottling, + boolean weightedRoutingStats, + boolean fileCacheStats + ) { + // for indices stats we want to include previous allocated shards stats as well (it will + // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) + return new ProtobufNodeStats( + transportService.getLocalNode(), + System.currentTimeMillis(), + null, + null, + null, + null, + threadPool ? this.threadPool.protobufStats() : null, + null, + transport ? transportService.stats() : null, + null, + null, + null, + null, + null, + null + ); + } + + public IngestService getIngestService() { + return ingestService; + } + + public MonitorService getMonitorService() { + return monitorService; + } + + public SearchBackpressureService getSearchBackpressureService() { + return searchBackpressureService; + } + + @Override + public void close() throws IOException { + IOUtils.close(indicesService); + } + + /** + * Wait for the node to be effectively closed. + * @see IndicesService#awaitClose(long, TimeUnit) + */ + public boolean awaitClose(long timeout, TimeUnit timeUnit) throws InterruptedException { + return indicesService.awaitClose(timeout, timeUnit); + } + +} diff --git a/server/src/main/java/org/opensearch/node/ProtobufReportingService.java b/server/src/main/java/org/opensearch/node/ProtobufReportingService.java index 9d9f507ce8cb3..d6e54f245ea95 100644 --- a/server/src/main/java/org/opensearch/node/ProtobufReportingService.java +++ b/server/src/main/java/org/opensearch/node/ProtobufReportingService.java @@ -16,7 +16,7 @@ * @opensearch.internal */ public interface ProtobufReportingService { - I info(); + I protobufInfo(); /** * Information interface. diff --git a/server/src/main/java/org/opensearch/plugins/ProtobufActionPlugin.java b/server/src/main/java/org/opensearch/plugins/ProtobufActionPlugin.java new file mode 100644 index 0000000000000..caed963482a31 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/ProtobufActionPlugin.java @@ -0,0 +1,205 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.plugins; + +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.RequestValidators; +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.opensearch.action.support.ProtobufActionFilter; +import org.opensearch.action.support.ProtobufTransportAction; +import org.opensearch.action.support.TransportActions; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.Strings; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.rest.RestController; +import org.opensearch.rest.ProtobufRestHandler; +import org.opensearch.rest.RestHeaderDefinition; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; + +/** + * An additional extension point for {@link Plugin}s that extends OpenSearch's scripting functionality. Implement it like this: +*

{@code
+*   {@literal @}Override
+*   public List> getActions() {
+*       return Arrays.asList(new ActionHandler<>(ReindexAction.INSTANCE, TransportReindexAction.class),
+*               new ActionHandler<>(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class),
+*               new ActionHandler<>(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class),
+*               new ActionHandler<>(RethrottleAction.INSTANCE, TransportRethrottleAction.class));
+*   }
+* }
+* +* @opensearch.api +*/ +public interface ProtobufActionPlugin { + /** + * Actions added by this plugin. + */ + default List> getActions() { + return Collections.emptyList(); + } + + /** + * Client actions added by this plugin. This defaults to all of the {@linkplain ProtobufActionType} in + * {@linkplain ProtobufActionPlugin#getActions()}. + */ + default List> getClientActions() { + return getActions().stream().map(a -> a.action).collect(Collectors.toList()); + } + + /** + * ProtobufActionType filters added by this plugin. + */ + default List getActionFilters() { + return Collections.emptyList(); + } + + /** + * Rest handlers added by this plugin. + */ + default List getRestHandlers( + Settings settings, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster + ) { + return Collections.emptyList(); + } + + /** + * Returns headers which should be copied through rest requests on to internal requests. + */ + default Collection getRestHeaders() { + return Collections.emptyList(); + } + + /** + * Returns headers which should be copied from internal requests into tasks. + */ + default Collection getTaskHeaders() { + return Collections.emptyList(); + } + + /** + * Returns a function used to wrap each rest request before handling the request. + * The returned {@link UnaryOperator} is called for every incoming rest request and receives + * the original rest handler as it's input. This allows adding arbitrary functionality around + * rest request handlers to do for instance logging or authentication. + * A simple example of how to only allow GET request is here: + *
+    * {@code
+    *    UnaryOperator getRestHandlerWrapper(ThreadContext threadContext) {
+    *      return originalHandler -> (ProtobufRestHandler) (request, channel, client) -> {
+    *        if (request.method() != Method.GET) {
+    *          throw new IllegalStateException("only GET requests are allowed");
+    *        }
+    *        originalHandler.handleRequest(request, channel, client);
+    *      };
+    *    }
+    * }
+    * 
+ * + * Note: Only one installed plugin may implement a rest wrapper. + */ + default UnaryOperator getRestHandlerWrapper(ThreadContext threadContext) { + return null; + } + + /** + * Class responsible for handing Transport Actions + * + * @opensearch.internal + */ + final class ActionHandler { + private final ProtobufActionType action; + private final Class> transportAction; + private final Class[] supportTransportActions; + + /** + * Create a record of an action, the {@linkplain ProtobufTransportAction} that handles it, and any supporting {@linkplain TransportActions} + * that are needed by that {@linkplain ProtobufTransportAction}. + */ + public ActionHandler( + ProtobufActionType action, + Class> transportAction, + Class... supportTransportActions + ) { + this.action = action; + this.transportAction = transportAction; + this.supportTransportActions = supportTransportActions; + } + + public ProtobufActionType getAction() { + return action; + } + + public Class> getTransportAction() { + return transportAction; + } + + public Class[] getSupportTransportActions() { + return supportTransportActions; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder().append(action.name()).append(" is handled by ").append(transportAction.getName()); + if (supportTransportActions.length > 0) { + b.append('[').append(Strings.arrayToCommaDelimitedString(supportTransportActions)).append(']'); + } + return b.toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != ActionHandler.class) { + return false; + } + ActionHandler other = (ActionHandler) obj; + return Objects.equals(action, other.action) + && Objects.equals(transportAction, other.transportAction) + && Objects.deepEquals(supportTransportActions, other.supportTransportActions); + } + + @Override + public int hashCode() { + return Objects.hash(action, transportAction, supportTransportActions); + } + } + + /** + * Returns a collection of validators that are used by {@link RequestValidators} to validate a + * {@link org.opensearch.action.admin.indices.mapping.put.PutMappingRequest} before the executing it. + */ + default Collection> mappingRequestValidators() { + return Collections.emptyList(); + } + + default Collection> indicesAliasesRequestValidators() { + return Collections.emptyList(); + } + +} diff --git a/server/src/main/java/org/opensearch/rest/MethodHandlers.java b/server/src/main/java/org/opensearch/rest/MethodHandlers.java index 8c29bf2e66036..7907b5fc3b041 100644 --- a/server/src/main/java/org/opensearch/rest/MethodHandlers.java +++ b/server/src/main/java/org/opensearch/rest/MethodHandlers.java @@ -47,15 +47,26 @@ final class MethodHandlers { private final String path; private final Map methodHandlers; + private final Map protobufMethodHandlers; MethodHandlers(String path, RestHandler handler, RestRequest.Method... methods) { this.path = path; this.methodHandlers = new HashMap<>(methods.length); + this.protobufMethodHandlers = new HashMap<>(methods.length); for (RestRequest.Method method : methods) { methodHandlers.put(method, handler); } } + MethodHandlers(String path, ProtobufRestHandler handler, RestRequest.Method... methods) { + this.path = path; + this.methodHandlers = new HashMap<>(methods.length); + this.protobufMethodHandlers = new HashMap<>(methods.length); + for (RestRequest.Method method : methods) { + protobufMethodHandlers.put(method, handler); + } + } + /** * Add a handler for an additional array of methods. Note that {@code MethodHandlers} * does not allow replacing the handler for an already existing method. @@ -70,6 +81,20 @@ MethodHandlers addMethods(RestHandler handler, RestRequest.Method... methods) { return this; } + /** + * Add a handler for an additional array of methods. Note that {@code MethodHandlers} + * does not allow replacing the handler for an already existing method. + */ + MethodHandlers addProtobufMethods(ProtobufRestHandler handler, RestRequest.Method... methods) { + for (RestRequest.Method method : methods) { + ProtobufRestHandler existing = protobufMethodHandlers.putIfAbsent(method, handler); + if (existing != null) { + throw new IllegalArgumentException("Cannot replace existing handler for [" + path + "] for method: " + method); + } + } + return this; + } + /** * Returns the handler for the given method or {@code null} if none exists. */ @@ -78,6 +103,14 @@ RestHandler getHandler(RestRequest.Method method) { return methodHandlers.get(method); } + /** + * Returns the handler for the given method or {@code null} if none exists. + */ + @Nullable + ProtobufRestHandler getProtobufHandler(RestRequest.Method method) { + return protobufMethodHandlers.get(method); + } + /** * Return a set of all valid HTTP methods for the particular path */ diff --git a/server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java b/server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java index 7224847832d5b..c585dd52b7c03 100644 --- a/server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java +++ b/server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java @@ -23,7 +23,7 @@ * @opensearch.api */ @FunctionalInterface -public interface ProtobufRestHandler extends ClientAgnosticRestHandler { +public interface ProtobufRestHandler { /** * Handles a rest request. diff --git a/server/src/main/java/org/opensearch/rest/RestBaseHandler.java b/server/src/main/java/org/opensearch/rest/RestBaseHandler.java new file mode 100644 index 0000000000000..ba51a918e2427 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/RestBaseHandler.java @@ -0,0 +1,189 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest; + +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.core.xcontent.XContent; +import org.opensearch.rest.RestRequest.Method; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * Handler for REST requests +* +* @opensearch.api +*/ +@FunctionalInterface +public interface RestBaseHandler { + + /** + * Handles a rest request. + * @param request The request to handle + * @param channel The channel to write the request response to + * @param client A client to use to make internal requests on behalf of the original request + */ + void handleRequest(RestRequest request, RestChannel channel, T client) throws Exception; + + default boolean canTripCircuitBreaker() { + return true; + } + + /** + * Indicates if the RestHandler supports content as a stream. A stream would be multiple objects delineated by + * {@link XContent#streamSeparator()}. If a handler returns true this will affect the types of content that can be sent to + * this endpoint. + */ + default boolean supportsContentStream() { + return false; + } + + /** + * Indicates if the RestHandler supports working with pooled buffers. If the request handler will not escape the return + * {@link RestRequest#content()} or any buffers extracted from it then there is no need to make a copies of any pooled buffers in the + * {@link RestRequest} instance before passing a request to this handler. If this instance does not support pooled/unsafe buffers + * {@link RestRequest#ensureSafeBuffers()} should be called on any request before passing it to {@link #handleRequest}. + * + * @return true iff the handler supports requests that make use of pooled buffers + */ + default boolean allowsUnsafeBuffers() { + return false; + } + + // /** + // * The list of {@link Route}s that this RestHandler is responsible for handling. + // */ + // default List routes() { + // return Collections.emptyList(); + // } + + /** + * A list of routes handled by this RestHandler that are deprecated and do not have a direct + * replacement. If changing the {@code path} or {@code method} of a route, + * use {@link #replacedRoutes()}. + */ + default List deprecatedRoutes() { + return Collections.emptyList(); + } + + /** + * A list of routes handled by this RestHandler that have had their {@code path} and/or + * {@code method} changed. The pre-existing {@code route} will be registered + * as deprecated alongside the updated {@code route}. + */ + default List replacedRoutes() { + return Collections.emptyList(); + } + + /** + * Route for the request. + * + * @opensearch.internal + */ + class Route { + + private final String path; + private final Method method; + + public Route(Method method, String path) { + this.path = path; + this.method = method; + } + + public String getPath() { + return path; + } + + public Method getMethod() { + return method; + } + } + + /** + * Represents an API that has been deprecated and is slated for removal. + */ + class DeprecatedRoute extends Route { + + private final String deprecationMessage; + + public DeprecatedRoute(Method method, String path, String deprecationMessage) { + super(method, path); + this.deprecationMessage = deprecationMessage; + } + + public String getDeprecationMessage() { + return deprecationMessage; + } + } + + /** + * Represents an API that has had its {@code path} or {@code method} changed. Holds both the + * new and previous {@code path} and {@code method} combination. + */ + class ReplacedRoute extends Route { + + private final String deprecatedPath; + private final Method deprecatedMethod; + + /** + * Construct replaced routes using new and deprocated methods and new and deprecated paths + * @param method route method + * @param path new route path + * @param deprecatedMethod deprecated method + * @param deprecatedPath deprecated path + */ + public ReplacedRoute(Method method, String path, Method deprecatedMethod, String deprecatedPath) { + super(method, path); + this.deprecatedMethod = deprecatedMethod; + this.deprecatedPath = deprecatedPath; + } + + /** + * Construct replaced routes using route method, new and deprecated paths + * This constructor can be used when both new and deprecated paths use the same method + * @param method route method + * @param path new route path + * @param deprecatedPath deprecated path + */ + public ReplacedRoute(Method method, String path, String deprecatedPath) { + this(method, path, method, deprecatedPath); + } + + /** + * Construct replaced routes using route, new and deprecated prefixes + * @param route route + * @param prefix new route prefix + * @param deprecatedPrefix deprecated prefix + */ + public ReplacedRoute(Route route, String prefix, String deprecatedPrefix) { + this(route.getMethod(), prefix + route.getPath(), deprecatedPrefix + route.getPath()); + } + + public String getDeprecatedPath() { + return deprecatedPath; + } + + public Method getDeprecatedMethod() { + return deprecatedMethod; + } + } + + /** + * Construct replaced routes using routes template and prefixes for new and deprecated paths + * @param routes routes + * @param prefix new prefix + * @param deprecatedPrefix deprecated prefix + * @return new list of API routes prefixed with the prefix string + */ + static List replaceRoutes(List routes, final String prefix, final String deprecatedPrefix) { + return routes.stream().map(route -> new ReplacedRoute(route, prefix, deprecatedPrefix)).collect(Collectors.toList()); + } +} diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index 1a8bddc094d26..9710ab5497c36 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -37,6 +37,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.client.node.NodeClient; +import org.opensearch.client.node.ProtobufNodeClient; import org.opensearch.common.Nullable; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.bytes.BytesArray; @@ -110,8 +111,12 @@ public class RestController implements HttpServerTransport.Dispatcher { private final UnaryOperator handlerWrapper; + private final UnaryOperator handlerProtobufWrapper; + private final NodeClient client; + private final ProtobufNodeClient protobufClient; + private final CircuitBreakerService circuitBreakerService; /** Rest headers that are copied to internal requests made during a rest request. */ @@ -133,7 +138,9 @@ public RestController( handlerWrapper = h -> h; // passthrough if no wrapper set } this.handlerWrapper = handlerWrapper; + this.handlerProtobufWrapper = null; this.client = client; + this.protobufClient = null; this.circuitBreakerService = circuitBreakerService; this.identityService = identityService; registerHandlerNoWrap( @@ -143,6 +150,35 @@ public RestController( ); } + public RestController( + Set headersToCopy, + UnaryOperator handlerWrapper, + NodeClient client, + UnaryOperator handlerProtobufWrapper, + ProtobufNodeClient protobufNodeClient, + CircuitBreakerService circuitBreakerService, + UsageService usageService + ) { + this.headersToCopy = headersToCopy; + this.usageService = usageService; + if (handlerProtobufWrapper == null) { + handlerProtobufWrapper = h -> h; // passthrough if no wrapper set + } + if (handlerWrapper == null) { + handlerWrapper = h -> h; // passthrough if no wrapper set + } + this.handlerProtobufWrapper = handlerProtobufWrapper; + this.protobufClient = protobufNodeClient; + this.client = client; + this.handlerWrapper = handlerWrapper; + this.circuitBreakerService = circuitBreakerService; + registerProtobufHandlerNoWrap( + RestRequest.Method.GET, + "/favicon.ico", + (request, channel, clnt) -> channel.sendResponse(new BytesRestResponse(RestStatus.OK, "image/x-icon", FAVICON_RESPONSE)) + ); + } + /** * Registers a REST handler to be executed when the provided {@code method} and {@code path} match the request. * @@ -217,6 +253,13 @@ protected void registerHandler(RestRequest.Method method, String path, RestHandl registerHandlerNoWrap(method, path, handlerWrapper.apply(handler)); } + protected void registerProtobufHandler(RestRequest.Method method, String path, ProtobufRestHandler handler) { + if (handler instanceof ProtobufBaseRestHandler) { + usageService.addProtobufRestHandler((ProtobufBaseRestHandler) handler); + } + registerProtobufHandlerNoWrap(method, path, handlerProtobufWrapper.apply(handler)); + } + private void registerHandlerNoWrap(RestRequest.Method method, String path, RestHandler maybeWrappedHandler) { handlers.insertOrUpdate( path, @@ -225,11 +268,19 @@ private void registerHandlerNoWrap(RestRequest.Method method, String path, RestH ); } + private void registerProtobufHandlerNoWrap(RestRequest.Method method, String path, ProtobufRestHandler maybeWrappedHandler) { + handlers.insertOrUpdate( + path, + new MethodHandlers(path, maybeWrappedHandler, method), + (mHandlers, newMHandler) -> mHandlers.addProtobufMethods(maybeWrappedHandler, method) + ); + } + /** * Registers a REST handler with the controller. The REST handler declares the {@code method} * and {@code path} combinations. */ - public void registerHandler(final RestHandler restHandler) { + public void registerHandler(final RestHandler restHandler) { restHandler.routes().forEach(route -> registerHandler(route.getMethod(), route.getPath(), restHandler)); restHandler.deprecatedRoutes() .forEach(route -> registerAsDeprecatedHandler(route.getMethod(), route.getPath(), restHandler, route.getDeprecationMessage())); @@ -245,8 +296,31 @@ public void registerHandler(final RestHandler restHandler) { ); } + /** + * Registers a REST handler with the controller. The REST handler declares the {@code method} + * and {@code path} combinations. + */ + public void registerProtobufHandler(final ProtobufRestHandler restHandler) { + System.out.println("Registering route"); + restHandler.routes().forEach(route -> System.out.println(route.getMethod() + " " + route.getPath())); + restHandler.routes().forEach(route -> registerProtobufHandler(route.getMethod(), route.getPath(), restHandler)); + // restHandler.deprecatedRoutes() + // .forEach(route -> registerAsDeprecatedHandler(route.getMethod(), route.getPath(), restHandler, route.getDeprecationMessage())); + // restHandler.replacedRoutes() + // .forEach( + // route -> registerWithDeprecatedHandler( + // route.getMethod(), + // route.getPath(), + // restHandler, + // route.getDeprecatedMethod(), + // route.getDeprecatedPath() + // ) + // ); + } + @Override public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + System.out.println("Dispatching request"); try { tryAllHandlers(request, channel, threadContext); } catch (Exception e) { @@ -325,6 +399,98 @@ private void dispatchRequest(RestRequest request, RestChannel channel, RestHandl } } + private void dispatchProtobufRequest(RestRequest request, RestChannel channel, ProtobufRestHandler handler) throws Exception { + System.out.println("Dispatching protobuf request"); + final int contentLength = request.content().length(); + if (contentLength > 0) { + final MediaType mediaType = request.getMediaType(); + if (mediaType == null) { + sendContentTypeErrorMessage(request.getAllHeaderValues("Content-Type"), channel); + return; + } + if (handler.supportsContentStream() && mediaType != XContentType.JSON && mediaType != XContentType.SMILE) { + channel.sendResponse( + BytesRestResponse.createSimpleErrorResponse( + channel, + RestStatus.NOT_ACCEPTABLE, + "Content-Type [" + mediaType + "] does not support stream parsing. Use JSON or SMILE instead" + ) + ); + return; + } + } + RestChannel responseChannel = channel; + try { + if (handler.canTripCircuitBreaker()) { + inFlightRequestsBreaker(circuitBreakerService).addEstimateBytesAndMaybeBreak(contentLength, ""); + } else { + inFlightRequestsBreaker(circuitBreakerService).addWithoutBreaking(contentLength); + } + // iff we could reserve bytes for the request we need to send the response also over this channel + responseChannel = new ResourceHandlingHttpChannel(channel, circuitBreakerService, contentLength); + // TODO: Count requests double in the circuit breaker if they need copying? + if (handler.allowsUnsafeBuffers() == false) { + request.ensureSafeBuffers(); + } + if (handler.allowSystemIndexAccessByDefault() == false && request.header(OPENSEARCH_PRODUCT_ORIGIN_HTTP_HEADER) == null) { + // The OPENSEARCH_PRODUCT_ORIGIN_HTTP_HEADER indicates that the request is coming from an OpenSearch product with a plan + // to move away from direct access to system indices, and thus deprecation warnings should not be emitted. + // This header is intended for internal use only. + protobufClient.threadPool().getThreadContext().putHeader(SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, Boolean.FALSE.toString()); + } + + handler.handleRequest(request, responseChannel, protobufClient); + } catch (Exception e) { + responseChannel.sendResponse(new BytesRestResponse(responseChannel, e)); + } + } + + private void dispatchProtobufRequest(RestRequest request, RestChannel channel, ProtobufRestHandler handler) throws Exception { + System.out.println("Dispatching protobuf request"); + final int contentLength = request.content().length(); + if (contentLength > 0) { + final XContentType xContentType = request.getXContentType(); + if (xContentType == null) { + sendContentTypeErrorMessage(request.getAllHeaderValues("Content-Type"), channel); + return; + } + if (handler.supportsContentStream() && xContentType != XContentType.JSON && xContentType != XContentType.SMILE) { + channel.sendResponse( + BytesRestResponse.createSimpleErrorResponse( + channel, + RestStatus.NOT_ACCEPTABLE, + "Content-Type [" + xContentType + "] does not support stream parsing. Use JSON or SMILE instead" + ) + ); + return; + } + } + RestChannel responseChannel = channel; + try { + if (handler.canTripCircuitBreaker()) { + inFlightRequestsBreaker(circuitBreakerService).addEstimateBytesAndMaybeBreak(contentLength, ""); + } else { + inFlightRequestsBreaker(circuitBreakerService).addWithoutBreaking(contentLength); + } + // iff we could reserve bytes for the request we need to send the response also over this channel + responseChannel = new ResourceHandlingHttpChannel(channel, circuitBreakerService, contentLength); + // TODO: Count requests double in the circuit breaker if they need copying? + if (handler.allowsUnsafeBuffers() == false) { + request.ensureSafeBuffers(); + } + if (handler.allowSystemIndexAccessByDefault() == false && request.header(OPENSEARCH_PRODUCT_ORIGIN_HTTP_HEADER) == null) { + // The OPENSEARCH_PRODUCT_ORIGIN_HTTP_HEADER indicates that the request is coming from an OpenSearch product with a plan + // to move away from direct access to system indices, and thus deprecation warnings should not be emitted. + // This header is intended for internal use only. + protobufClient.threadPool().getThreadContext().putHeader(SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, Boolean.FALSE.toString()); + } + + handler.handleRequest(request, responseChannel, protobufClient); + } catch (Exception e) { + responseChannel.sendResponse(new BytesRestResponse(responseChannel, e)); + } + } + private boolean handleNoHandlerFound(String rawPath, RestRequest.Method method, String uri, RestChannel channel) { // Get the map of matching handlers for a request, for the full set of HTTP methods. final Set validMethodSet = getValidHandlerMethodSet(rawPath); @@ -356,6 +522,8 @@ private void sendContentTypeErrorMessage(@Nullable List contentTypeHeade } private void tryAllHandlers(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) throws Exception { + System.out.println("Trying all handlers"); + System.out.println("Request: " + request.toString()); for (final RestHeaderDefinition restHeader : headersToCopy) { final String name = restHeader.getName(); final List headerValues = request.getAllHeaderValues(name); @@ -387,6 +555,9 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel final String rawPath = request.rawPath(); final String uri = request.uri(); final RestRequest.Method requestMethod; + System.out.println("raw path: " + rawPath); + System.out.println("uri: " + uri); + System.out.println("request method: " + request.method().toString()); try { // Resolves the HTTP method and fails if the method is invalid requestMethod = request.method(); @@ -394,23 +565,40 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel Iterator allHandlers = getAllHandlers(request.params(), rawPath); while (allHandlers.hasNext()) { final RestHandler handler; + final ProtobufRestHandler protobufHandler; + System.out.println("All handlers has next"); final MethodHandlers handlers = allHandlers.next(); + System.out.println("Handlers: " + handlers); if (handlers == null) { + System.out.println("Handlers is null"); handler = null; + protobufHandler = null; } else { - handler = handlers.getHandler(requestMethod); + if (rawPath.contains("protobuf")) { + handler = null; + protobufHandler = handlers.getProtobufHandler(requestMethod); + System.out.println("Protobuf handler: " + protobufHandler); + } else { + protobufHandler = null; + handler = handlers.getHandler(requestMethod); + System.out.println("Handler: " + handler); + } } - if (handler == null) { + if (handler == null && protobufHandler == null) { if (handleNoHandlerFound(rawPath, requestMethod, uri, channel)) { return; } } else { + if (rawPath.contains("protobuf")) { + dispatchProtobufRequest(request, channel, protobufHandler); + } else { if (FeatureFlags.isEnabled(FeatureFlags.IDENTITY)) { if (!handleAuthenticateUser(request, channel)) { return; } } - dispatchRequest(request, channel, handler); + dispatchRequest(request, channel, handler); + } return; } } @@ -423,6 +611,9 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel } Iterator getAllHandlers(@Nullable Map requestParamsRef, String rawPath) { + System.out.println("Getting all handlers"); + System.out.println("Request params: " + requestParamsRef); + System.out.println("Raw path: " + rawPath); final Supplier> paramsSupplier; if (requestParamsRef == null) { paramsSupplier = () -> null; diff --git a/server/src/main/java/org/opensearch/rest/RestHandler.java b/server/src/main/java/org/opensearch/rest/RestHandler.java index 5b1583f260da0..7832649e8ad32 100644 --- a/server/src/main/java/org/opensearch/rest/RestHandler.java +++ b/server/src/main/java/org/opensearch/rest/RestHandler.java @@ -47,7 +47,7 @@ * @opensearch.api */ @FunctionalInterface -public interface RestHandler extends ClientAgnosticRestHandler { +public interface RestHandler { /** * Handles a rest request. diff --git a/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestCatAction.java b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestCatAction.java new file mode 100644 index 0000000000000..ec432fe6e64e1 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestCatAction.java @@ -0,0 +1,58 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest.action.cat; + +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.rest.ProtobufBaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestStatus; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Base _cat API endpoint +* +* @opensearch.api +*/ +public class ProtobufRestCatAction extends ProtobufBaseRestHandler { + + private static final String CAT = "=^.^="; + private static final String CAT_NL = CAT + "\n"; + private final String HELP; + + public ProtobufRestCatAction(List catActions) { + StringBuilder sb = new StringBuilder(); + sb.append(CAT_NL); + for (ProtobufAbstractCatAction catAction : catActions) { + catAction.documentation(sb); + } + HELP = sb.toString(); + } + + @Override + public List routes() { + return singletonList(new Route(GET, "/_cat")); + } + + @Override + public String getName() { + return "cat_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final ProtobufNodeClient client) throws IOException { + return channel -> channel.sendResponse(new BytesRestResponse(RestStatus.OK, HELP)); + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java index ae0924641a16a..93db24d9ee7d6 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java @@ -70,17 +70,17 @@ public class ProtobufRestNodesAction extends ProtobufAbstractCatAction { @Override public List routes() { - return singletonList(new Route(GET, "/_cat/nodes")); + return singletonList(new Route(GET, "/_cat/nodes_protobuf")); } @Override public String getName() { - return "cat_nodes_action"; + return "cat_nodes_protobuf_action"; } @Override protected void documentation(StringBuilder sb) { - sb.append("/_cat/nodes\n"); + sb.append("/_cat/nodes_protobuf\n"); } @Override diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java index 61e80ad7f242c..5a716bf2a4032 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java @@ -19,7 +19,7 @@ import org.opensearch.action.support.ProtobufChannelActionListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.cluster.node.ProtobufDiscoveryNode; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ProtobufEmptyTransportResponseHandler; import org.opensearch.transport.ProtobufTransportChannel; import org.opensearch.transport.ProtobufTransportException; @@ -41,14 +41,14 @@ public class ProtobufTaskCancellationService { public static final String BAN_PARENT_ACTION_NAME = "internal:admin/tasks/ban"; private static final Logger logger = LogManager.getLogger(ProtobufTaskCancellationService.class); private final ProtobufTransportService transportService; - private final ProtobufTaskManager taskManager; + private final TaskManager taskManager; public ProtobufTaskCancellationService(ProtobufTransportService transportService) { this.transportService = transportService; this.taskManager = transportService.getTaskManager(); transportService.registerRequestHandler( BAN_PARENT_ACTION_NAME, - ProtobufThreadPool.Names.SAME, + ThreadPool.Names.SAME, BanParentTaskRequest::new, new BanParentRequestHandler() ); @@ -64,11 +64,11 @@ void cancelTaskAndDescendants(ProtobufCancellableTask task, String reason, boole logger.trace("cancelling task [{}] and its descendants", taskId); StepListener completedListener = new StepListener<>(); GroupedActionListener groupedListener = new GroupedActionListener<>(ActionListener.map(completedListener, r -> null), 3); - Collection childrenNodes = taskManager.startBanOnChildrenNodes(task.getId(), () -> { + Collection childrenNodes = taskManager.startBanOnChildrenNodesProtobuf(task.getId(), () -> { logger.trace("child tasks of parent [{}] are completed", taskId); groupedListener.onResponse(null); }); - taskManager.cancel(task, reason, () -> { + taskManager.cancelProtobufTask(task, reason, () -> { logger.trace("task [{}] is cancelled", taskId); groupedListener.onResponse(null); }); @@ -92,9 +92,9 @@ void cancelTaskAndDescendants(ProtobufCancellableTask task, String reason, boole } else { logger.trace("task [{}] doesn't have any children that should be cancelled", taskId); if (waitForCompletion) { - taskManager.cancel(task, reason, () -> listener.onResponse(null)); + taskManager.cancelProtobufTask(task, reason, () -> listener.onResponse(null)); } else { - taskManager.cancel(task, reason, () -> {}); + taskManager.cancelProtobufTask(task, reason, () -> {}); listener.onResponse(null); } } @@ -123,7 +123,7 @@ private void setBanOnNodes( node, BAN_PARENT_ACTION_NAME, banRequest, - new ProtobufEmptyTransportResponseHandler(ProtobufThreadPool.Names.SAME) { + new ProtobufEmptyTransportResponseHandler(ThreadPool.Names.SAME) { @Override public void handleResponse(ProtobufTransportResponse.Empty response) { logger.trace("sent ban for tasks with the parent [{}] to the node [{}]", taskId, node); @@ -151,7 +151,7 @@ private void removeBanOnNodes(ProtobufCancellableTask task, Collection childTasks = taskManager.setBan(request.parentTaskId, request.reason); + final List childTasks = taskManager.setBanProtobuf(request.parentTaskId, request.reason); final GroupedActionListener listener = new GroupedActionListener<>( ActionListener.map( new ProtobufChannelActionListener<>(channel, BAN_PARENT_ACTION_NAME, request), @@ -236,7 +236,7 @@ public void messageReceived(final BanParentTaskRequest request, final ProtobufTr listener.onResponse(null); } else { logger.debug("Removing ban for the parent [{}] on the node [{}]", request.parentTaskId, localNodeId()); - taskManager.removeBan(request.parentTaskId); + taskManager.removeBanProtobuf(request.parentTaskId); channel.sendResponse(ProtobufTransportResponse.Empty.INSTANCE); } } diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java index a6e53ebabdd9e..b53958eb290d3 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java @@ -36,7 +36,7 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ConcurrentMapLong; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TcpChannel; import java.io.IOException; @@ -85,7 +85,7 @@ public class ProtobufTaskManager implements ProtobufClusterStateApplier { * Rest headers that are copied to the task */ private final List taskHeaders; - private final ProtobufThreadPool threadPool; + private final ThreadPool threadPool; private final ConcurrentMapLong tasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); @@ -111,7 +111,7 @@ public class ProtobufTaskManager implements ProtobufClusterStateApplier { public static ProtobufTaskManager createTaskManagerWithClusterSettings( Settings settings, ClusterSettings clusterSettings, - ProtobufThreadPool threadPool, + ThreadPool threadPool, Set taskHeaders ) { final ProtobufTaskManager taskManager = new ProtobufTaskManager(settings, threadPool, taskHeaders); @@ -119,7 +119,7 @@ public static ProtobufTaskManager createTaskManagerWithClusterSettings( return taskManager; } - public ProtobufTaskManager(Settings settings, ProtobufThreadPool threadPool, Set taskHeaders) { + public ProtobufTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { this.threadPool = threadPool; this.taskHeaders = new ArrayList<>(taskHeaders); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); @@ -479,7 +479,7 @@ public Collection startBanOnChildrenNodes(long taskId, Ru } @Override - public void applyClusterState(ProtobufClusterChangedEvent event) { + public void applyProtobufClusterState(ProtobufClusterChangedEvent event) { lastDiscoveryNodes = event.state().getNodes(); if (event.nodesRemoved()) { synchronized (banedParents) { diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java index 8c165e9237047..3e97e5c6c678e 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java @@ -22,7 +22,7 @@ import org.opensearch.common.util.concurrent.ConcurrentMapLong; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.threadpool.RunnableTaskExecutionListener; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; import java.lang.management.ManagementFactory; import java.util.ArrayList; @@ -53,11 +53,11 @@ public class ProtobufTaskResourceTrackingService implements RunnableTaskExecutio private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections .newConcurrentMapLongWithAggressiveConcurrency(); private final List taskCompletionListeners = new ArrayList<>(); - private final ProtobufThreadPool threadPool; + private final ThreadPool threadPool; private volatile boolean taskResourceTrackingEnabled; @Inject - public ProtobufTaskResourceTrackingService(Settings settings, ClusterSettings clusterSettings, ProtobufThreadPool threadPool) { + public ProtobufTaskResourceTrackingService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { this.taskResourceTrackingEnabled = TASK_RESOURCE_TRACKING_ENABLED.get(settings); this.threadPool = threadPool; clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_TRACKING_ENABLED, this::setTaskResourceTrackingEnabled); diff --git a/server/src/main/java/org/opensearch/tasks/TaskManager.java b/server/src/main/java/org/opensearch/tasks/TaskManager.java index ec59a13dd1999..484ca84501be9 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/TaskManager.java @@ -42,10 +42,15 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.ActionResponse; import org.opensearch.action.NotifyOnceListener; +import org.opensearch.action.ProtobufActionResponse; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateApplier; +import org.opensearch.cluster.ProtobufClusterChangedEvent; +import org.opensearch.cluster.ProtobufClusterStateApplier; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.ProtobufDiscoveryNodes; import org.opensearch.common.SetOnce; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -61,6 +66,9 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TcpChannel; +import com.carrotsearch.hppc.ObjectIntHashMap; +import com.carrotsearch.hppc.ObjectIntMap; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -78,6 +86,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; @@ -87,7 +96,7 @@ * * @opensearch.internal */ -public class TaskManager implements ClusterStateApplier { +public class TaskManager implements ClusterStateApplier, ProtobufClusterStateApplier { private static final Logger logger = LogManager.getLogger(TaskManager.class); @@ -109,25 +118,34 @@ public class TaskManager implements ClusterStateApplier { private final ThreadPool threadPool; private final ConcurrentMapLong tasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + private final ConcurrentMapLong protobufTasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); private final ConcurrentMapLong cancellableTasks = ConcurrentCollections .newConcurrentMapLongWithAggressiveConcurrency(); + private final ConcurrentMapLong protobufCancellableTasks = ConcurrentCollections + .newConcurrentMapLongWithAggressiveConcurrency(); private final AtomicLong taskIdGenerator = new AtomicLong(); private final Map banedParents = new ConcurrentHashMap<>(); + private final Map banedParentsProtobuf = new ConcurrentHashMap<>(); private TaskResultsService taskResultsService; private final SetOnce taskResourceTrackingService = new SetOnce<>(); + private final SetOnce protobufTaskResourceTrackingService = new SetOnce<>(); private volatile DiscoveryNodes lastDiscoveryNodes = DiscoveryNodes.EMPTY_NODES; + private volatile ProtobufDiscoveryNodes lastDiscoveryNodesProtobuf = ProtobufDiscoveryNodes.EMPTY_NODES; private final ByteSizeValue maxHeaderSize; private final Map channelPendingTaskTrackers = ConcurrentCollections.newConcurrentMap(); + private final Map protobufChannelPendingTaskTrackers = ConcurrentCollections.newConcurrentMap(); private final SetOnce cancellationService = new SetOnce<>(); + private final SetOnce protobufCancellationService = new SetOnce<>(); private volatile boolean taskResourceConsumersEnabled; private final Set> taskResourceConsumer; + private final Set> protobufTaskResourceConsumer; private final List taskEventListeners = new ArrayList<>(); public static TaskManager createTaskManagerWithClusterSettings( @@ -147,6 +165,7 @@ public TaskManager(Settings settings, ThreadPool threadPool, Set taskHea this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); this.taskResourceConsumersEnabled = TASK_RESOURCE_CONSUMERS_ENABLED.get(settings); taskResourceConsumer = new HashSet<>(); + protobufTaskResourceConsumer = new HashSet<>(); } /** @@ -166,6 +185,10 @@ public void registerTaskResourceConsumer(Consumer consumer) { taskResourceConsumer.add(consumer); } + public void registerProtobufTaskResourceConsumer(Consumer consumer) { + protobufTaskResourceConsumer.add(consumer); + } + public void setTaskResultsService(TaskResultsService taskResultsService) { assert this.taskResultsService == null; this.taskResultsService = taskResultsService; @@ -179,6 +202,14 @@ public void setTaskResourceTrackingService(TaskResourceTrackingService taskResou this.taskResourceTrackingService.set(taskResourceTrackingService); } + public void setTaskCancellationService(ProtobufTaskCancellationService taskCancellationService) { + this.protobufCancellationService.set(taskCancellationService); + } + + public void setTaskResourceTrackingService(ProtobufTaskResourceTrackingService taskResourceTrackingService) { + this.protobufTaskResourceTrackingService.set(taskResourceTrackingService); + } + public void setTaskResourceConsumersEnabled(boolean taskResourceConsumersEnabled) { this.taskResourceConsumersEnabled = taskResourceConsumersEnabled; } @@ -262,6 +293,85 @@ private void registerCancellableTask(Task task) { } } + /** + * Registers a task without parent task + */ + public ProtobufTask registerProtobuf(String type, String action, ProtobufTaskAwareRequest request) { + Map headers = new HashMap<>(); + long headerSize = 0; + long maxSize = maxHeaderSize.getBytes(); + ThreadContext threadContext = threadPool.getThreadContext(); + for (String key : taskHeaders) { + String httpHeader = threadContext.getHeader(key); + if (httpHeader != null) { + headerSize += key.length() * 2 + httpHeader.length() * 2; + if (headerSize > maxSize) { + throw new IllegalArgumentException("Request exceeded the maximum size of task headers " + maxHeaderSize); + } + headers.put(key, httpHeader); + } + } + ProtobufTask task = request.createTask(taskIdGenerator.incrementAndGet(), type, action, request.getParentTask(), headers); + Objects.requireNonNull(task); + assert task.getParentTaskId().equals(request.getParentTask()) : "Request [ " + request + "] didn't preserve it parentTaskId"; + if (logger.isTraceEnabled()) { + logger.trace("register {} [{}] [{}] [{}]", task.getId(), type, action, task.getDescription()); + } + + if (task.supportsResourceTracking()) { + boolean success = task.addResourceTrackingCompletionListener(new NotifyOnceListener<>() { + @Override + protected void innerOnResponse(ProtobufTask task) { + // Stop tracking the task once the last thread has been marked inactive. + if (protobufTaskResourceTrackingService.get() != null && task.supportsResourceTracking()) { + protobufTaskResourceTrackingService.get().stopTracking(task); + } + } + + @Override + protected void innerOnFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + }); + + if (success == false) { + logger.debug( + "failed to register a completion listener as task resource tracking has already completed [taskId={}]", + task.getId() + ); + } + } + + if (task instanceof ProtobufCancellableTask) { + registerProtobufCancellableTask(task); + } else { + ProtobufTask previousTask = protobufTasks.put(task.getId(), task); + assert previousTask == null; + } + return task; + } + + private void registerProtobufCancellableTask(ProtobufTask task) { + ProtobufCancellableTask cancellableTask = (ProtobufCancellableTask) task; + ProtobufCancellableTaskHolder holder = new ProtobufCancellableTaskHolder(cancellableTask); + ProtobufCancellableTaskHolder oldHolder = protobufCancellableTasks.put(task.getId(), holder); + assert oldHolder == null; + // Check if this task was banned before we start it. The empty check is used to avoid + // computing the hash code of the parent taskId as most of the time banedParents is empty. + if (task.getParentTaskId().isSet() && banedParents.isEmpty() == false) { + String reason = banedParents.get(task.getParentTaskId()); + if (reason != null) { + try { + holder.cancel(reason); + throw new TaskCancelledException("ProtobufTask cancelled before it started: " + reason); + } finally { + // let's clean up the registration + unregisterProtobufTask(task); + } + } + } + } + /** * Cancels a task *

@@ -290,6 +400,51 @@ public void cancel(CancellableTask task, String reason, Runnable listener) { } } + /** + * Cancels a task + *

+ * After starting cancellation on the parent task, the task manager tries to cancel all children tasks + * of the current task. Once cancellation of the children tasks is done, the listener is triggered. + * If the task is completed or unregistered from ProtobufTaskManager, then the listener is called immediately. + */ + public void cancelProtobufTask(ProtobufCancellableTask task, String reason, Runnable listener) { + CancellableTaskHolder holder = cancellableTasks.get(task.getId()); + List exceptions = new ArrayList<>(); + for (TaskEventListeners taskEventListener : taskEventListeners) { + try { + taskEventListener.onTaskCancelled(task); + } catch (Exception e) { + exceptions.add(e); + } + } + // Throwing exception in case any of the cancellation listener results into exception. + // Should we just swallow such exceptions? + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); + if (holder != null) { + logger.trace("cancelling task with id {}", task.getId()); + holder.cancel(reason, listener); + } else { + listener.run(); + } + } + + /** + * Cancels a task + *

+ * After starting cancellation on the parent task, the task manager tries to cancel all children tasks + * of the current task. Once cancellation of the children tasks is done, the listener is triggered. + * If the task is completed or unregistered from ProtobufTaskManager, then the listener is called immediately. + */ + public void cancelProtobufTask(ProtobufCancellableTask task, String reason, Runnable listener) { + CancellableTaskHolder holder = cancellableTasks.get(task.getId()); + if (holder != null) { + logger.trace("cancelling task with id {}", task.getId()); + holder.cancel(reason, listener); + } else { + listener.run(); + } + } + /** * Unregister the task */ @@ -331,6 +486,38 @@ public Task unregister(Task task) { } } + /** + * Unregister the task + */ + public ProtobufTask unregisterProtobufTask(ProtobufTask task) { + logger.trace("unregister task for id: {}", task.getId()); + + // Decrement the task's self-thread as part of unregistration. + task.decrementResourceTrackingThreads(); + + if (taskResourceConsumersEnabled) { + for (Consumer taskConsumer : protobufTaskResourceConsumer) { + try { + taskConsumer.accept(task); + } catch (Exception e) { + logger.error("error encountered when updating the consumer", e); + } + } + } + + if (task instanceof ProtobufCancellableTask) { + ProtobufCancellableTaskHolder holder = protobufCancellableTasks.remove(task.getId()); + if (holder != null) { + holder.finish(); + return holder.getTask(); + } else { + return null; + } + } else { + return protobufTasks.remove(task.getId()); + } + } + /** * Register a node on which a child task will execute. The returned {@link Releasable} must be called * to unregister the child node once the child task is completed or failed. @@ -352,6 +539,27 @@ public DiscoveryNode localNode() { return lastDiscoveryNodes.getLocalNode(); } + /** + * Register a node on which a child task will execute. The returned {@link Releasable} must be called + * to unregister the child node once the child task is completed or failed. + */ + public Releasable registerProtobufChildNode(long taskId, ProtobufDiscoveryNode node) { + final ProtobufCancellableTaskHolder holder = protobufCancellableTasks.get(taskId); + if (holder != null) { + logger.trace("register child node [{}] task [{}]", node, taskId); + holder.registerChildNode(node); + return Releasables.releaseOnce(() -> { + logger.trace("unregister child node [{}] task [{}]", node, taskId); + holder.unregisterChildNode(node); + }); + } + return () -> {}; + } + + public ProtobufDiscoveryNode localProtobufNode() { + return lastDiscoveryNodesProtobuf.getLocalNode(); + } + /** * Stores the task failure */ @@ -384,6 +592,42 @@ public void onFailure(Exception e) { }); } + /** + * Stores the task failure + */ + public void storeResultProtobuf( + ProtobufTask task, + Exception error, + ActionListener listener + ) { + ProtobufDiscoveryNode localNode = lastDiscoveryNodesProtobuf.getLocalNode(); + if (localNode == null) { + // too early to store anything, shouldn't really be here - just pass the error along + listener.onFailure(error); + return; + } + final ProtobufTaskResult taskResult; + try { + taskResult = task.result(localNode, error); + } catch (IOException ex) { + logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); + listener.onFailure(ex); + return; + } + taskResultsService.storeResult(taskResult, new ActionListener() { + @Override + public void onResponse(Void aVoid) { + listener.onFailure(error); + } + + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e); + listener.onFailure(e); + } + }); + } + /** * Stores the task result */ @@ -418,6 +662,44 @@ public void onFailure(Exception e) { }); } + /** + * Stores the task result + */ + public void storeResultProtobuf( + ProtobufTask task, + Response response, + ActionListener listener + ) { + ProtobufDiscoveryNode localNode = lastDiscoveryNodesProtobuf.getLocalNode(); + if (localNode == null) { + // too early to store anything, shouldn't really be here - just pass the response along + logger.warn("couldn't store response {}, the node didn't join the cluster yet", response); + listener.onResponse(response); + return; + } + final ProtobufTaskResult taskResult; + try { + taskResult = task.result(localNode, response); + } catch (IOException ex) { + logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), ex); + listener.onFailure(ex); + return; + } + + taskResultsService.storeResult(taskResult, new ActionListener() { + @Override + public void onResponse(Void aVoid) { + listener.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), e); + listener.onFailure(e); + } + }); + } + /** * Returns the list of currently running tasks on the node */ @@ -429,6 +711,17 @@ public Map getTasks() { return Collections.unmodifiableMap(taskHashMap); } + /** + * Returns the list of currently running tasks on the node + */ + public Map getProtobufTasks() { + HashMap taskHashMap = new HashMap<>(this.protobufTasks); + for (ProtobufCancellableTaskHolder holder : protobufCancellableTasks.values()) { + taskHashMap.put(holder.getTask().getId(), holder.getTask()); + } + return Collections.unmodifiableMap(taskHashMap); + } + /** * Returns the list of currently running tasks on the node that can be cancelled */ @@ -440,6 +733,17 @@ public Map getCancellableTasks() { return Collections.unmodifiableMap(taskHashMap); } + /** + * Returns the list of currently running tasks on the node that can be cancelled + */ + public Map getProtobufCancellableTasks() { + HashMap taskHashMap = new HashMap<>(); + for (ProtobufCancellableTaskHolder holder : protobufCancellableTasks.values()) { + taskHashMap.put(holder.getTask().getId(), holder.getTask()); + } + return Collections.unmodifiableMap(taskHashMap); + } + /** * Returns a task with given id, or null if the task is not found. */ @@ -452,6 +756,18 @@ public Task getTask(long id) { } } + /** + * Returns a task with given id, or null if the task is not found. + */ + public ProtobufTask getProtobufTask(long id) { + ProtobufTask task = protobufTasks.get(id); + if (task != null) { + return task; + } else { + return getProtobufCancellableTask(id); + } + } + /** * Returns a cancellable task with given id, or null if the task is not found. */ @@ -464,6 +780,18 @@ public CancellableTask getCancellableTask(long id) { } } + /** + * Returns a cancellable task with given id, or null if the task is not found. + */ + public ProtobufCancellableTask getProtobufCancellableTask(long id) { + ProtobufCancellableTaskHolder holder = protobufCancellableTasks.get(id); + if (holder != null) { + return holder.getTask(); + } else { + return null; + } + } + /** * Returns the number of currently banned tasks. *

@@ -525,25 +853,109 @@ public Collection startBanOnChildrenNodes(long taskId, Runnable o } } - @Override - public void applyClusterState(ClusterChangedEvent event) { - lastDiscoveryNodes = event.state().getNodes(); - if (event.nodesRemoved()) { - synchronized (banedParents) { - lastDiscoveryNodes = event.state().getNodes(); - // Remove all bans that were registered by nodes that are no longer in the cluster state - Iterator banIterator = banedParents.keySet().iterator(); - while (banIterator.hasNext()) { - TaskId taskId = banIterator.next(); - if (lastDiscoveryNodes.nodeExists(taskId.getNodeId()) == false) { - logger.debug( - "Removing ban for the parent [{}] on the node [{}], reason: the parent node is gone", - taskId, - event.state().getNodes().getLocalNode() - ); - banIterator.remove(); - } - } + /** + * Returns the number of currently banned tasks. + *

+ * Will be used in task manager stats and for debugging. + */ + public int getBanCountProtobuf() { + return banedParentsProtobuf.size(); + } + + /** + * Bans all tasks with the specified parent task from execution, cancels all tasks that are currently executing. + *

+ * This method is called when a parent task that has children is cancelled. + * + * @return a list of pending cancellable child tasks + */ + public List setBanProtobuf(ProtobufTaskId parentTaskId, String reason) { + logger.trace("setting ban for the parent task {} {}", parentTaskId, reason); + + // Set the ban first, so the newly created tasks cannot be registered + synchronized (banedParentsProtobuf) { + if (lastDiscoveryNodesProtobuf.nodeExists(parentTaskId.getNodeId())) { + // Only set the ban if the node is the part of the cluster + banedParentsProtobuf.put(parentTaskId, reason); + } + } + return protobufCancellableTasks.values().stream().filter(t -> t.hasParent(parentTaskId)).map(t -> t.task).collect(Collectors.toList()); + } + + /** + * Removes the ban for the specified parent task. + *

+ * This method is called when a previously banned task finally cancelled + */ + public void removeBanProtobuf(ProtobufTaskId parentTaskId) { + logger.trace("removing ban for the parent task {}", parentTaskId); + banedParentsProtobuf.remove(parentTaskId); + } + + // for testing + public Set getBannedTaskIdsProtobuf() { + return Collections.unmodifiableSet(banedParentsProtobuf.keySet()); + } + + /** + * Start rejecting new child requests as the parent task was cancelled. + * + * @param taskId the parent task id + * @param onChildTasksCompleted called when all child tasks are completed or failed + * @return the set of current nodes that have outstanding child tasks + */ + public Collection startBanOnChildrenNodesProtobuf(long taskId, Runnable onChildTasksCompleted) { + final ProtobufCancellableTaskHolder holder = protobufCancellableTasks.get(taskId); + if (holder != null) { + return holder.startBan(onChildTasksCompleted); + } else { + onChildTasksCompleted.run(); + return Collections.emptySet(); + } + } + + @Override + public void applyClusterState(ClusterChangedEvent event) { + lastDiscoveryNodes = event.state().getNodes(); + if (event.nodesRemoved()) { + synchronized (banedParents) { + lastDiscoveryNodes = event.state().getNodes(); + // Remove all bans that were registered by nodes that are no longer in the cluster state + Iterator banIterator = banedParents.keySet().iterator(); + while (banIterator.hasNext()) { + TaskId taskId = banIterator.next(); + if (lastDiscoveryNodes.nodeExists(taskId.getNodeId()) == false) { + logger.debug( + "Removing ban for the parent [{}] on the node [{}], reason: the parent node is gone", + taskId, + event.state().getNodes().getLocalNode() + ); + banIterator.remove(); + } + } + } + } + } + + @Override + public void applyProtobufClusterState(ProtobufClusterChangedEvent event) { + lastDiscoveryNodesProtobuf = event.state().getNodes(); + if (event.nodesRemoved()) { + synchronized (banedParentsProtobuf) { + lastDiscoveryNodesProtobuf = event.state().getNodes(); + // Remove all bans that were registered by nodes that are no longer in the cluster state + Iterator banIterator = banedParentsProtobuf.keySet().iterator(); + while (banIterator.hasNext()) { + ProtobufTaskId taskId = banIterator.next(); + if (lastDiscoveryNodesProtobuf.nodeExists(taskId.getNodeId()) == false) { + logger.debug( + "Removing ban for the parent [{}] on the node [{}], reason: the parent node is gone", + taskId, + event.state().getNodes().getLocalNode() + ); + banIterator.remove(); + } + } } } } @@ -565,6 +977,23 @@ public void waitForTaskCompletion(Task task, long untilInNanos) { throw new OpenSearchTimeoutException("Timed out waiting for completion of [{}]", task); } + /** + * Blocks the calling thread, waiting for the task to vanish from the ProtobufTaskManager. + */ + public void waitForProtobufTaskCompletion(ProtobufTask task, long untilInNanos) { + while (System.nanoTime() - untilInNanos < 0) { + if (getTask(task.getId()) == null) { + return; + } + try { + Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis()); + } catch (InterruptedException e) { + throw new OpenSearchException("Interrupted waiting for completion of [{}]", e, task); + } + } + throw new OpenSearchTimeoutException("Timed out waiting for completion of [{}]", task); + } + /** * Takes actions when a task is registered and its execution starts * @@ -577,6 +1006,18 @@ public ThreadContext.StoredContext taskExecutionStarted(Task task) { return taskResourceTrackingService.get().startTracking(task); } + /** + * Takes actions when a task is registered and its execution starts + * + * @param task getting executed. + * @return AutoCloseable to free up resources (clean up thread context) when task execution block returns + */ + public ThreadContext.StoredContext protobufTaskExecutionStarted(ProtobufTask task) { + if (protobufTaskResourceTrackingService.get() == null) return () -> {}; + + return protobufTaskResourceTrackingService.get().startTracking(task); + } + private static class CancellableTaskHolder { private final CancellableTask task; private boolean finished = false; @@ -710,6 +1151,271 @@ Set startBan(Runnable onChildTasksCompleted) { } } + private static class ProtobufCancellableTaskHolder { + private final ProtobufCancellableTask task; + private boolean finished = false; + private List cancellationListeners = null; + private ObjectIntMap childTasksPerNode = null; + private boolean banChildren = false; + private List childTaskCompletedListeners = null; + + ProtobufCancellableTaskHolder(ProtobufCancellableTask task) { + this.task = task; + } + + void cancel(String reason, Runnable listener) { + final Runnable toRun; + synchronized (this) { + if (finished) { + assert cancellationListeners == null; + toRun = listener; + } else { + toRun = () -> {}; + if (listener != null) { + if (cancellationListeners == null) { + cancellationListeners = new ArrayList<>(); + } + cancellationListeners.add(listener); + } + } + } + try { + task.cancel(reason); + } finally { + if (toRun != null) { + toRun.run(); + } + } + } + + void cancel(String reason) { + task.cancel(reason); + } + + /** + * Marks task as finished. + */ + public void finish() { + final List listeners; + synchronized (this) { + this.finished = true; + if (cancellationListeners != null) { + listeners = cancellationListeners; + cancellationListeners = null; + } else { + listeners = Collections.emptyList(); + } + } + // We need to call the listener outside of the synchronised section to avoid potential bottle necks + // in the listener synchronization + notifyListeners(listeners); + } + + private void notifyListeners(List listeners) { + assert Thread.holdsLock(this) == false; + Exception rootException = null; + for (Runnable listener : listeners) { + try { + listener.run(); + } catch (RuntimeException inner) { + rootException = ExceptionsHelper.useOrSuppress(rootException, inner); + } + } + ExceptionsHelper.reThrowIfNotNull(rootException); + } + + public boolean hasParent(ProtobufTaskId parentTaskId) { + return task.getParentTaskId().equals(parentTaskId); + } + + public ProtobufCancellableTask getTask() { + return task; + } + + synchronized void registerChildNode(ProtobufDiscoveryNode node) { + if (banChildren) { + throw new TaskCancelledException("The parent task was cancelled, shouldn't start any child tasks"); + } + if (childTasksPerNode == null) { + childTasksPerNode = new ObjectIntHashMap<>(); + } + childTasksPerNode.addTo(node, 1); + } + + void unregisterChildNode(ProtobufDiscoveryNode node) { + final List listeners; + synchronized (this) { + if (childTasksPerNode.addTo(node, -1) == 0) { + childTasksPerNode.remove(node); + } + if (childTasksPerNode.isEmpty() && this.childTaskCompletedListeners != null) { + listeners = childTaskCompletedListeners; + childTaskCompletedListeners = null; + } else { + listeners = Collections.emptyList(); + } + } + notifyListeners(listeners); + } + + Set startBan(Runnable onChildTasksCompleted) { + final Set pendingChildNodes; + final Runnable toRun; + synchronized (this) { + banChildren = true; + if (childTasksPerNode == null) { + pendingChildNodes = Collections.emptySet(); + } else { + pendingChildNodes = Set.copyOf(childTasksPerNode.keySet()); + } + if (pendingChildNodes.isEmpty()) { + assert childTaskCompletedListeners == null; + toRun = onChildTasksCompleted; + } else { + toRun = () -> {}; + if (childTaskCompletedListeners == null) { + childTaskCompletedListeners = new ArrayList<>(); + } + childTaskCompletedListeners.add(onChildTasksCompleted); + } + } + toRun.run(); + return pendingChildNodes; + } + } + + private static class ProtobufCancellableTaskHolder { + private final ProtobufCancellableTask task; + private boolean finished = false; + private List cancellationListeners = null; + private ObjectIntMap childTasksPerNode = null; + private boolean banChildren = false; + private List childTaskCompletedListeners = null; + + ProtobufCancellableTaskHolder(ProtobufCancellableTask task) { + this.task = task; + } + + void cancel(String reason, Runnable listener) { + final Runnable toRun; + synchronized (this) { + if (finished) { + assert cancellationListeners == null; + toRun = listener; + } else { + toRun = () -> {}; + if (listener != null) { + if (cancellationListeners == null) { + cancellationListeners = new ArrayList<>(); + } + cancellationListeners.add(listener); + } + } + } + try { + task.cancel(reason); + } finally { + if (toRun != null) { + toRun.run(); + } + } + } + + void cancel(String reason) { + task.cancel(reason); + } + + /** + * Marks task as finished. + */ + public void finish() { + final List listeners; + synchronized (this) { + this.finished = true; + if (cancellationListeners != null) { + listeners = cancellationListeners; + cancellationListeners = null; + } else { + listeners = Collections.emptyList(); + } + } + // We need to call the listener outside of the synchronised section to avoid potential bottle necks + // in the listener synchronization + notifyListeners(listeners); + } + + private void notifyListeners(List listeners) { + assert Thread.holdsLock(this) == false; + Exception rootException = null; + for (Runnable listener : listeners) { + try { + listener.run(); + } catch (RuntimeException inner) { + rootException = ExceptionsHelper.useOrSuppress(rootException, inner); + } + } + ExceptionsHelper.reThrowIfNotNull(rootException); + } + + public boolean hasParent(ProtobufTaskId parentTaskId) { + return task.getParentTaskId().equals(parentTaskId); + } + + public ProtobufCancellableTask getTask() { + return task; + } + + synchronized void registerChildNode(ProtobufDiscoveryNode node) { + if (banChildren) { + throw new TaskCancelledException("The parent task was cancelled, shouldn't start any child tasks"); + } + if (childTasksPerNode == null) { + childTasksPerNode = new ObjectIntHashMap<>(); + } + childTasksPerNode.addTo(node, 1); + } + + void unregisterChildNode(ProtobufDiscoveryNode node) { + final List listeners; + synchronized (this) { + if (childTasksPerNode.addTo(node, -1) == 0) { + childTasksPerNode.remove(node); + } + if (childTasksPerNode.isEmpty() && this.childTaskCompletedListeners != null) { + listeners = childTaskCompletedListeners; + childTaskCompletedListeners = null; + } else { + listeners = Collections.emptyList(); + } + } + notifyListeners(listeners); + } + + Set startBan(Runnable onChildTasksCompleted) { + final Set pendingChildNodes; + final Runnable toRun; + synchronized (this) { + banChildren = true; + if (childTasksPerNode == null) { + pendingChildNodes = Collections.emptySet(); + } else { + pendingChildNodes = Set.copyOf(childTasksPerNode.keySet()); + } + if (pendingChildNodes.isEmpty()) { + assert childTaskCompletedListeners == null; + toRun = onChildTasksCompleted; + } else { + toRun = () -> {}; + if (childTaskCompletedListeners == null) { + childTaskCompletedListeners = new ArrayList<>(); + } + childTaskCompletedListeners.add(onChildTasksCompleted); + } + } + toRun.run(); + return pendingChildNodes; + } + } /** * Start tracking a cancellable task with its tcp channel, so if the channel gets closed we can get a set of * pending tasks associated that channel and cancel them as these results won't be retrieved by the parent task. @@ -800,4 +1506,100 @@ public void cancelTaskAndDescendants(CancellableTask task, String reason, boolea throw new IllegalStateException("TaskCancellationService is not initialized"); } } + + /** + * Start tracking a cancellable task with its tcp channel, so if the channel gets closed we can get a set of + * pending tasks associated that channel and cancel them as these results won't be retrieved by the parent task. + * + * @return a releasable that should be called when this pending task is completed + */ + public Releasable startProtobufTrackingCancellableChannelTask(TcpChannel channel, ProtobufCancellableTask task) { + assert protobufCancellableTasks.containsKey(task.getId()) : "task [" + task.getId() + "] is not registered yet"; + final ProtobufChannelPendingTaskTracker tracker = protobufChannelPendingTaskTrackers.compute(channel, (k, curr) -> { + if (curr == null) { + curr = new ProtobufChannelPendingTaskTracker(); + } + curr.addTask(task); + return curr; + }); + if (tracker.registered.compareAndSet(false, true)) { + channel.addCloseListener(ActionListener.wrap(r -> { + final ProtobufChannelPendingTaskTracker removedTracker = protobufChannelPendingTaskTrackers.remove(channel); + assert removedTracker == tracker; + cancelProtobufTasksOnChannelClosed(tracker.drainTasks()); + }, e -> { assert false : new AssertionError("must not be here", e); })); + } + return () -> tracker.removeTask(task); + } + + // for testing + final int numberOfProtobufChannelPendingTaskTrackers() { + return protobufChannelPendingTaskTrackers.size(); + } + + private static class ProtobufChannelPendingTaskTracker { + final AtomicBoolean registered = new AtomicBoolean(); + final Semaphore permits = Assertions.ENABLED ? new Semaphore(Integer.MAX_VALUE) : null; + final Set pendingTasks = ConcurrentCollections.newConcurrentSet(); + + void addTask(ProtobufCancellableTask task) { + assert permits.tryAcquire() : "tracker was drained"; + final boolean added = pendingTasks.add(task); + assert added : "task " + task.getId() + " is in the pending list already"; + assert releasePermit(); + } + + boolean acquireAllPermits() { + permits.acquireUninterruptibly(Integer.MAX_VALUE); + return true; + } + + boolean releasePermit() { + permits.release(); + return true; + } + + Set drainTasks() { + assert acquireAllPermits(); // do not release permits so we can't add tasks to this tracker after draining + return Collections.unmodifiableSet(pendingTasks); + } + + void removeTask(ProtobufCancellableTask task) { + final boolean removed = pendingTasks.remove(task); + assert removed : "task " + task.getId() + " is not in the pending list"; + } + } + + private void cancelProtobufTasksOnChannelClosed(Set tasks) { + if (tasks.isEmpty() == false) { + threadPool.generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + logger.warn("failed to cancel tasks on channel closed", e); + } + + @Override + protected void doRun() { + for (ProtobufCancellableTask task : tasks) { + cancelProtobufTaskAndDescendants(task, "channel was closed", false, ActionListener.wrap(() -> {})); + } + } + }); + } + } + + public void cancelProtobufTaskAndDescendants( + ProtobufCancellableTask task, + String reason, + boolean waitForCompletion, + ActionListener listener + ) { + final ProtobufTaskCancellationService service = protobufCancellationService.get(); + if (service != null) { + service.cancelTaskAndDescendants(task, reason, waitForCompletion, listener); + } else { + assert false : "ProtobufTaskCancellationService is not initialized"; + throw new IllegalStateException("ProtobufTaskCancellationService is not initialized"); + } + } } diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ProtobufExecutorBuilder.java deleted file mode 100644 index 3276e5029e6f2..0000000000000 --- a/server/src/main/java/org/opensearch/threadpool/ProtobufExecutorBuilder.java +++ /dev/null @@ -1,91 +0,0 @@ -/* -* SPDX-License-Identifier: Apache-2.0 -* -* The OpenSearch Contributors require contributions made to -* this file be licensed under the Apache-2.0 license or a -* compatible open source license. -*/ - -package org.opensearch.threadpool; - -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.common.util.concurrent.ThreadContext; - -import java.util.List; - -/** - * Base class for executor builders. -* -* @param the underlying type of the executor settings -* -* @opensearch.internal -*/ -public abstract class ProtobufExecutorBuilder { - - private final String name; - - public ProtobufExecutorBuilder(String name) { - this.name = name; - } - - protected String name() { - return name; - } - - protected static String settingsKey(final String prefix, final String key) { - return String.join(".", prefix, key); - } - - protected int applyHardSizeLimit(final Settings settings, final String name) { - if (name.equals("bulk") || name.equals(ThreadPool.Names.WRITE) || name.equals(ThreadPool.Names.SYSTEM_WRITE)) { - return 1 + OpenSearchExecutors.allocatedProcessors(settings); - } else { - return Integer.MAX_VALUE; - } - } - - /** - * The list of settings this builder will register. - * - * @return the list of registered settings - */ - public abstract List> getRegisteredSettings(); - - /** - * Return an executor settings object from the node-level settings. - * - * @param settings the node-level settings - * @return the executor settings object - */ - abstract U getSettings(Settings settings); - - /** - * Builds the executor with the specified executor settings. - * - * @param settings the executor settings - * @param threadContext the current thread context - * @return a new executor built from the specified executor settings - */ - abstract ProtobufThreadPool.ExecutorHolder build(U settings, ThreadContext threadContext); - - /** - * Format the thread pool info object for this executor. - * - * @param info the thread pool info object to format - * @return a formatted thread pool info (useful for logging) - */ - abstract String formatInfo(ProtobufThreadPool.Info info); - - abstract static class ExecutorSettings { - - protected final String nodeName; - - ExecutorSettings(String nodeName) { - this.nodeName = nodeName; - } - - } - -} diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufFixedExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ProtobufFixedExecutorBuilder.java deleted file mode 100644 index 6d5bce32533ab..0000000000000 --- a/server/src/main/java/org/opensearch/threadpool/ProtobufFixedExecutorBuilder.java +++ /dev/null @@ -1,182 +0,0 @@ -/* -* SPDX-License-Identifier: Apache-2.0 -* -* The OpenSearch Contributors require contributions made to -* this file be licensed under the Apache-2.0 license or a -* compatible open source license. -*/ - -package org.opensearch.threadpool; - -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ProtobufSizeValue; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.node.Node; - -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadFactory; - -/** - * A builder for fixed executors. -* -* @opensearch.internal -*/ -public final class ProtobufFixedExecutorBuilder extends ProtobufExecutorBuilder { - - private final Setting sizeSetting; - private final Setting queueSizeSetting; - - /** - * Construct a fixed executor builder; the settings will have the key prefix "thread_pool." followed by the executor name. - * - * @param settings the node-level settings - * @param name the name of the executor - * @param size the fixed number of threads - * @param queueSize the size of the backing queue, -1 for unbounded - */ - ProtobufFixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize) { - this(settings, name, size, queueSize, false); - } - - /** - * Construct a fixed executor builder; the settings will have the key prefix "thread_pool." followed by the executor name. - * - * @param settings the node-level settings - * @param name the name of the executor - * @param size the fixed number of threads - * @param queueSize the size of the backing queue, -1 for unbounded - * @param deprecated whether or not the thread pool is deprecated - */ - ProtobufFixedExecutorBuilder( - final Settings settings, - final String name, - final int size, - final int queueSize, - final boolean deprecated - ) { - this(settings, name, size, queueSize, "thread_pool." + name, deprecated); - } - - /** - * Construct a fixed executor builder. - * - * @param settings the node-level settings - * @param name the name of the executor - * @param size the fixed number of threads - * @param queueSize the size of the backing queue, -1 for unbounded - * @param prefix the prefix for the settings keys - */ - public ProtobufFixedExecutorBuilder( - final Settings settings, - final String name, - final int size, - final int queueSize, - final String prefix - ) { - this(settings, name, size, queueSize, prefix, false); - } - - /** - * Construct a fixed executor builder. - * - * @param settings the node-level settings - * @param name the name of the executor - * @param size the fixed number of threads - * @param queueSize the size of the backing queue, -1 for unbounded - * @param prefix the prefix for the settings keys - * @param deprecated whether or not the thread pool is deprecated - */ - public ProtobufFixedExecutorBuilder( - final Settings settings, - final String name, - final int size, - final int queueSize, - final String prefix, - final boolean deprecated - ) { - super(name); - final String sizeKey = settingsKey(prefix, "size"); - final Setting.Property[] properties; - if (deprecated) { - properties = new Setting.Property[] { Setting.Property.NodeScope, Setting.Property.Deprecated }; - } else { - properties = new Setting.Property[] { Setting.Property.NodeScope }; - } - this.sizeSetting = new Setting<>( - sizeKey, - s -> Integer.toString(size), - s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey), - properties - ); - final String queueSizeKey = settingsKey(prefix, "queue_size"); - this.queueSizeSetting = Setting.intSetting(queueSizeKey, queueSize, properties); - } - - @Override - public List> getRegisteredSettings() { - return Arrays.asList(sizeSetting, queueSizeSetting); - } - - @Override - FixedExecutorSettings getSettings(Settings settings) { - final String nodeName = Node.NODE_NAME_SETTING.get(settings); - final int size = sizeSetting.get(settings); - final int queueSize = queueSizeSetting.get(settings); - return new FixedExecutorSettings(nodeName, size, queueSize); - } - - @Override - ProtobufThreadPool.ExecutorHolder build(final FixedExecutorSettings settings, final ThreadContext threadContext) { - int size = settings.size; - int queueSize = settings.queueSize; - final ThreadFactory threadFactory = OpenSearchExecutors.daemonThreadFactory( - OpenSearchExecutors.threadName(settings.nodeName, name()) - ); - final ExecutorService executor = OpenSearchExecutors.newFixed( - settings.nodeName + "/" + name(), - size, - queueSize, - threadFactory, - threadContext - ); - final ProtobufThreadPool.Info info = new ProtobufThreadPool.Info( - name(), - ProtobufThreadPool.ThreadPoolType.FIXED, - size, - size, - null, - queueSize < 0 ? null : new ProtobufSizeValue(queueSize) - ); - return new ProtobufThreadPool.ExecutorHolder(executor, info); - } - - @Override - String formatInfo(ProtobufThreadPool.Info info) { - return String.format( - Locale.ROOT, - "name [%s], size [%d], queue size [%s]", - info.getName(), - info.getMax(), - info.getQueueSize() == null ? "unbounded" : info.getQueueSize() - ); - } - - static class FixedExecutorSettings extends ProtobufExecutorBuilder.ExecutorSettings { - - private final int size; - private final int queueSize; - - FixedExecutorSettings(final String nodeName, final int size, final int queueSize) { - super(nodeName); - this.size = size; - this.queueSize = queueSize; - } - - } - -} diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufResizableExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ProtobufResizableExecutorBuilder.java deleted file mode 100644 index f95a13d2b6a82..0000000000000 --- a/server/src/main/java/org/opensearch/threadpool/ProtobufResizableExecutorBuilder.java +++ /dev/null @@ -1,134 +0,0 @@ -/* -* SPDX-License-Identifier: Apache-2.0 -* -* The OpenSearch Contributors require contributions made to -* this file be licensed under the Apache-2.0 license or a -* compatible open source license. -*/ - -package org.opensearch.threadpool; - -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ProtobufSizeValue; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.node.Node; - -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicReference; - -/** - * A builder for resizable executors. -* -* @opensearch.internal -*/ -public final class ProtobufResizableExecutorBuilder extends ProtobufExecutorBuilder< - ProtobufResizableExecutorBuilder.ResizableExecutorSettings> { - - private final Setting sizeSetting; - private final Setting queueSizeSetting; - private final AtomicReference runnableTaskListener; - - ProtobufResizableExecutorBuilder( - final Settings settings, - final String name, - final int size, - final int queueSize, - final AtomicReference runnableTaskListener - ) { - this(settings, name, size, queueSize, "thread_pool." + name, runnableTaskListener); - } - - public ProtobufResizableExecutorBuilder( - final Settings settings, - final String name, - final int size, - final int queueSize, - final String prefix, - final AtomicReference runnableTaskListener - ) { - super(name); - final String sizeKey = settingsKey(prefix, "size"); - this.sizeSetting = new Setting<>( - sizeKey, - s -> Integer.toString(size), - s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey), - Setting.Property.NodeScope - ); - final String queueSizeKey = settingsKey(prefix, "queue_size"); - this.queueSizeSetting = Setting.intSetting( - queueSizeKey, - queueSize, - new Setting.Property[] { Setting.Property.NodeScope, Setting.Property.Dynamic } - ); - this.runnableTaskListener = runnableTaskListener; - } - - @Override - public List> getRegisteredSettings() { - return Arrays.asList(sizeSetting, queueSizeSetting); - } - - @Override - ResizableExecutorSettings getSettings(Settings settings) { - final String nodeName = Node.NODE_NAME_SETTING.get(settings); - final int size = sizeSetting.get(settings); - final int queueSize = queueSizeSetting.get(settings); - return new ResizableExecutorSettings(nodeName, size, queueSize); - } - - @Override - ProtobufThreadPool.ExecutorHolder build(final ResizableExecutorSettings settings, final ThreadContext threadContext) { - int size = settings.size; - int queueSize = settings.queueSize; - final ThreadFactory threadFactory = OpenSearchExecutors.daemonThreadFactory( - OpenSearchExecutors.threadName(settings.nodeName, name()) - ); - final ExecutorService executor = OpenSearchExecutors.newResizable( - settings.nodeName + "/" + name(), - size, - queueSize, - threadFactory, - threadContext, - runnableTaskListener - ); - final ProtobufThreadPool.Info info = new ProtobufThreadPool.Info( - name(), - ProtobufThreadPool.ThreadPoolType.RESIZABLE, - size, - size, - null, - queueSize < 0 ? null : new ProtobufSizeValue(queueSize) - ); - return new ProtobufThreadPool.ExecutorHolder(executor, info); - } - - @Override - String formatInfo(ProtobufThreadPool.Info info) { - return String.format( - Locale.ROOT, - "name [%s], size [%d], queue size [%s]", - info.getName(), - info.getMax(), - info.getQueueSize() == null ? "unbounded" : info.getQueueSize() - ); - } - - static class ResizableExecutorSettings extends ProtobufExecutorBuilder.ExecutorSettings { - - private final int size; - private final int queueSize; - - ResizableExecutorSettings(final String nodeName, final int size, final int queueSize) { - super(nodeName); - this.size = size; - this.queueSize = queueSize; - } - - } -} diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufScalingExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ProtobufScalingExecutorBuilder.java deleted file mode 100644 index 583dfb4e785ba..0000000000000 --- a/server/src/main/java/org/opensearch/threadpool/ProtobufScalingExecutorBuilder.java +++ /dev/null @@ -1,141 +0,0 @@ -/* -* SPDX-License-Identifier: Apache-2.0 -* -* The OpenSearch Contributors require contributions made to -* this file be licensed under the Apache-2.0 license or a -* compatible open source license. -*/ - -package org.opensearch.threadpool; - -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.node.Node; - -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; - -/** - * A builder for scaling executors. -* -* @opensearch.internal -*/ -public final class ProtobufScalingExecutorBuilder extends ProtobufExecutorBuilder { - - private final Setting coreSetting; - private final Setting maxSetting; - private final Setting keepAliveSetting; - - /** - * Construct a scaling executor builder; the settings will have the - * key prefix "thread_pool." followed by the executor name. - * - * @param name the name of the executor - * @param core the minimum number of threads in the pool - * @param max the maximum number of threads in the pool - * @param keepAlive the time that spare threads above {@code core} - * threads will be kept alive - */ - public ProtobufScalingExecutorBuilder(final String name, final int core, final int max, final TimeValue keepAlive) { - this(name, core, max, keepAlive, "thread_pool." + name); - } - - /** - * Construct a scaling executor builder; the settings will have the - * specified key prefix. - * - * @param name the name of the executor - * @param core the minimum number of threads in the pool - * @param max the maximum number of threads in the pool - * @param keepAlive the time that spare threads above {@code core} - * threads will be kept alive - * @param prefix the prefix for the settings keys - */ - public ProtobufScalingExecutorBuilder( - final String name, - final int core, - final int max, - final TimeValue keepAlive, - final String prefix - ) { - super(name); - this.coreSetting = Setting.intSetting(settingsKey(prefix, "core"), core, Setting.Property.NodeScope); - this.maxSetting = Setting.intSetting(settingsKey(prefix, "max"), max, Setting.Property.NodeScope); - this.keepAliveSetting = Setting.timeSetting(settingsKey(prefix, "keep_alive"), keepAlive, Setting.Property.NodeScope); - } - - @Override - public List> getRegisteredSettings() { - return Arrays.asList(coreSetting, maxSetting, keepAliveSetting); - } - - @Override - ScalingExecutorSettings getSettings(Settings settings) { - final String nodeName = Node.NODE_NAME_SETTING.get(settings); - final int coreThreads = coreSetting.get(settings); - final int maxThreads = maxSetting.get(settings); - final TimeValue keepAlive = keepAliveSetting.get(settings); - return new ScalingExecutorSettings(nodeName, coreThreads, maxThreads, keepAlive); - } - - ProtobufThreadPool.ExecutorHolder build(final ScalingExecutorSettings settings, final ThreadContext threadContext) { - TimeValue keepAlive = settings.keepAlive; - int core = settings.core; - int max = settings.max; - final ProtobufThreadPool.Info info = new ProtobufThreadPool.Info( - name(), - ProtobufThreadPool.ThreadPoolType.SCALING, - core, - max, - keepAlive, - null - ); - final ThreadFactory threadFactory = OpenSearchExecutors.daemonThreadFactory( - OpenSearchExecutors.threadName(settings.nodeName, name()) - ); - final ExecutorService executor = OpenSearchExecutors.newScaling( - settings.nodeName + "/" + name(), - core, - max, - keepAlive.millis(), - TimeUnit.MILLISECONDS, - threadFactory, - threadContext - ); - return new ProtobufThreadPool.ExecutorHolder(executor, info); - } - - @Override - String formatInfo(ProtobufThreadPool.Info info) { - return String.format( - Locale.ROOT, - "name [%s], core [%d], max [%d], keep alive [%s]", - info.getName(), - info.getMin(), - info.getMax(), - info.getKeepAlive() - ); - } - - static class ScalingExecutorSettings extends ProtobufExecutorBuilder.ExecutorSettings { - - private final int core; - private final int max; - private final TimeValue keepAlive; - - ScalingExecutorSettings(final String nodeName, final int core, final int max, final TimeValue keepAlive) { - super(nodeName); - this.core = core; - this.max = max; - this.keepAlive = keepAlive; - } - } - -} diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java deleted file mode 100644 index c77984a5a5953..0000000000000 --- a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPool.java +++ /dev/null @@ -1,860 +0,0 @@ -/* -* SPDX-License-Identifier: Apache-2.0 -* -* The OpenSearch Contributors require contributions made to -* this file be licensed under the Apache-2.0 license or a -* compatible open source license. -*/ - -package org.opensearch.threadpool; - -import com.google.protobuf.CodedInputStream; -import com.google.protobuf.CodedOutputStream; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.Version; -import org.opensearch.common.Nullable; -import org.opensearch.common.io.stream.ProtobufStreamInput; -import org.opensearch.common.io.stream.ProtobufStreamOutput; -import org.opensearch.common.io.stream.ProtobufWriteable; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ProtobufSizeValue; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.util.concurrent.XRejectedExecutionHandler; -import org.opensearch.node.Node; -import org.opensearch.node.ProtobufReportingService; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.RejectedExecutionHandler; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - -import static java.util.Collections.unmodifiableMap; - -/** - * The OpenSearch threadpool class -* -* @opensearch.internal -*/ -public class ProtobufThreadPool implements ProtobufReportingService, Scheduler { - - private static final Logger logger = LogManager.getLogger(ThreadPool.class); - - /** - * The threadpool names. - * - * @opensearch.internal - */ - public static class Names { - public static final String SAME = "same"; - public static final String GENERIC = "generic"; - @Deprecated - public static final String LISTENER = "listener"; - public static final String GET = "get"; - public static final String ANALYZE = "analyze"; - public static final String WRITE = "write"; - public static final String SEARCH = "search"; - public static final String SEARCH_THROTTLED = "search_throttled"; - public static final String MANAGEMENT = "management"; - public static final String FLUSH = "flush"; - public static final String REFRESH = "refresh"; - public static final String WARMER = "warmer"; - public static final String SNAPSHOT = "snapshot"; - public static final String FORCE_MERGE = "force_merge"; - public static final String FETCH_SHARD_STARTED = "fetch_shard_started"; - public static final String FETCH_SHARD_STORE = "fetch_shard_store"; - public static final String SYSTEM_READ = "system_read"; - public static final String SYSTEM_WRITE = "system_write"; - public static final String TRANSLOG_TRANSFER = "translog_transfer"; - public static final String TRANSLOG_SYNC = "translog_sync"; - public static final String REMOTE_PURGE = "remote_purge"; - } - - /** - * The threadpool type. - * - * @opensearch.internal - */ - public enum ThreadPoolType { - DIRECT("direct"), - FIXED("fixed"), - RESIZABLE("resizable"), - SCALING("scaling"); - - private final String type; - - public String getType() { - return type; - } - - ThreadPoolType(String type) { - this.type = type; - } - - private static final Map TYPE_MAP; - - static { - Map typeMap = new HashMap<>(); - for (ThreadPoolType threadPoolType : ThreadPoolType.values()) { - typeMap.put(threadPoolType.getType(), threadPoolType); - } - TYPE_MAP = Collections.unmodifiableMap(typeMap); - } - - public static ThreadPoolType fromType(String type) { - ThreadPoolType threadPoolType = TYPE_MAP.get(type); - if (threadPoolType == null) { - throw new IllegalArgumentException("no ThreadPoolType for " + type); - } - return threadPoolType; - } - } - - public static final Map THREAD_POOL_TYPES; - - static { - HashMap map = new HashMap<>(); - map.put(Names.SAME, ThreadPoolType.DIRECT); - map.put(Names.GENERIC, ThreadPoolType.SCALING); - map.put(Names.LISTENER, ThreadPoolType.FIXED); - map.put(Names.GET, ThreadPoolType.FIXED); - map.put(Names.ANALYZE, ThreadPoolType.FIXED); - map.put(Names.WRITE, ThreadPoolType.FIXED); - map.put(Names.SEARCH, ThreadPoolType.RESIZABLE); - map.put(Names.MANAGEMENT, ThreadPoolType.SCALING); - map.put(Names.FLUSH, ThreadPoolType.SCALING); - map.put(Names.REFRESH, ThreadPoolType.SCALING); - map.put(Names.WARMER, ThreadPoolType.SCALING); - map.put(Names.SNAPSHOT, ThreadPoolType.SCALING); - map.put(Names.FORCE_MERGE, ThreadPoolType.FIXED); - map.put(Names.FETCH_SHARD_STARTED, ThreadPoolType.SCALING); - map.put(Names.FETCH_SHARD_STORE, ThreadPoolType.SCALING); - map.put(Names.SEARCH_THROTTLED, ThreadPoolType.RESIZABLE); - map.put(Names.SYSTEM_READ, ThreadPoolType.FIXED); - map.put(Names.SYSTEM_WRITE, ThreadPoolType.FIXED); - map.put(Names.TRANSLOG_TRANSFER, ThreadPoolType.SCALING); - map.put(Names.TRANSLOG_SYNC, ThreadPoolType.FIXED); - map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING); - THREAD_POOL_TYPES = Collections.unmodifiableMap(map); - } - - private final Map executors; - - private final ProtobufThreadPoolInfo threadPoolInfo; - - private final CachedTimeThread cachedTimeThread; - - static final ExecutorService DIRECT_EXECUTOR = OpenSearchExecutors.newDirectExecutorService(); - - private final ThreadContext threadContext; - - private final Map builders; - - private final ScheduledThreadPoolExecutor scheduler; - - public Collection builders() { - return Collections.unmodifiableCollection(builders.values()); - } - - public static Setting ESTIMATED_TIME_INTERVAL_SETTING = Setting.timeSetting( - "thread_pool.estimated_time_interval", - TimeValue.timeValueMillis(200), - TimeValue.ZERO, - Setting.Property.NodeScope - ); - - public ProtobufThreadPool(final Settings settings, final ProtobufExecutorBuilder... customBuilders) { - this(settings, null, customBuilders); - } - - public ProtobufThreadPool( - final Settings settings, - final AtomicReference runnableTaskListener, - final ProtobufExecutorBuilder... customBuilders - ) { - assert Node.NODE_NAME_SETTING.exists(settings); - - final Map builders = new HashMap<>(); - final int allocatedProcessors = OpenSearchExecutors.allocatedProcessors(settings); - final int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors); - final int halfProcMaxAt10 = halfAllocatedProcessorsMaxTen(allocatedProcessors); - final int genericThreadPoolMax = boundedBy(4 * allocatedProcessors, 128, 512); - builders.put( - Names.GENERIC, - new ProtobufScalingExecutorBuilder(Names.GENERIC, 4, genericThreadPoolMax, TimeValue.timeValueSeconds(30)) - ); - builders.put(Names.WRITE, new ProtobufFixedExecutorBuilder(settings, Names.WRITE, allocatedProcessors, 10000)); - builders.put(Names.GET, new ProtobufFixedExecutorBuilder(settings, Names.GET, allocatedProcessors, 1000)); - builders.put(Names.ANALYZE, new ProtobufFixedExecutorBuilder(settings, Names.ANALYZE, 1, 16)); - builders.put( - Names.SEARCH, - new ProtobufResizableExecutorBuilder( - settings, - Names.SEARCH, - searchThreadPoolSize(allocatedProcessors), - 1000, - runnableTaskListener - ) - ); - builders.put( - Names.SEARCH_THROTTLED, - new ProtobufResizableExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100, runnableTaskListener) - ); - builders.put(Names.MANAGEMENT, new ProtobufScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5))); - // no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded - // the assumption here is that the listeners should be very lightweight on the listeners side - builders.put(Names.LISTENER, new ProtobufFixedExecutorBuilder(settings, Names.LISTENER, halfProcMaxAt10, -1, true)); - builders.put(Names.FLUSH, new ProtobufScalingExecutorBuilder(Names.FLUSH, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); - builders.put(Names.REFRESH, new ProtobufScalingExecutorBuilder(Names.REFRESH, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5))); - builders.put(Names.WARMER, new ProtobufScalingExecutorBuilder(Names.WARMER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); - builders.put(Names.SNAPSHOT, new ProtobufScalingExecutorBuilder(Names.SNAPSHOT, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); - builders.put( - Names.FETCH_SHARD_STARTED, - new ProtobufScalingExecutorBuilder(Names.FETCH_SHARD_STARTED, 1, 2 * allocatedProcessors, TimeValue.timeValueMinutes(5)) - ); - builders.put(Names.FORCE_MERGE, new ProtobufFixedExecutorBuilder(settings, Names.FORCE_MERGE, 1, -1)); - builders.put( - Names.FETCH_SHARD_STORE, - new ProtobufScalingExecutorBuilder(Names.FETCH_SHARD_STORE, 1, 2 * allocatedProcessors, TimeValue.timeValueMinutes(5)) - ); - builders.put(Names.SYSTEM_READ, new ProtobufFixedExecutorBuilder(settings, Names.SYSTEM_READ, halfProcMaxAt5, 2000, false)); - builders.put(Names.SYSTEM_WRITE, new ProtobufFixedExecutorBuilder(settings, Names.SYSTEM_WRITE, halfProcMaxAt5, 1000, false)); - builders.put( - Names.TRANSLOG_TRANSFER, - new ProtobufScalingExecutorBuilder(Names.TRANSLOG_TRANSFER, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5)) - ); - builders.put(Names.TRANSLOG_SYNC, new ProtobufFixedExecutorBuilder(settings, Names.TRANSLOG_SYNC, allocatedProcessors * 4, 10000)); - builders.put( - Names.REMOTE_PURGE, - new ProtobufScalingExecutorBuilder(Names.REMOTE_PURGE, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5)) - ); - - for (final ProtobufExecutorBuilder builder : customBuilders) { - if (builders.containsKey(builder.name())) { - throw new IllegalArgumentException("builder with name [" + builder.name() + "] already exists"); - } - builders.put(builder.name(), builder); - } - this.builders = Collections.unmodifiableMap(builders); - - threadContext = new ThreadContext(settings); - - final Map executors = new HashMap<>(); - for (final Map.Entry entry : builders.entrySet()) { - final ProtobufExecutorBuilder.ExecutorSettings executorSettings = entry.getValue().getSettings(settings); - final ExecutorHolder executorHolder = entry.getValue().build(executorSettings, threadContext); - if (executors.containsKey(executorHolder.info.getName())) { - throw new IllegalStateException("duplicate executors with name [" + executorHolder.info.getName() + "] registered"); - } - logger.debug("created thread pool: {}", entry.getValue().formatInfo(executorHolder.info)); - executors.put(entry.getKey(), executorHolder); - } - - executors.put(Names.SAME, new ExecutorHolder(DIRECT_EXECUTOR, new Info(Names.SAME, ThreadPoolType.DIRECT))); - this.executors = unmodifiableMap(executors); - - final List infos = executors.values() - .stream() - .filter(holder -> holder.info.getName().equals("same") == false) - .map(holder -> holder.info) - .collect(Collectors.toList()); - this.threadPoolInfo = new ProtobufThreadPoolInfo(infos); - this.scheduler = Scheduler.initScheduler(settings); - TimeValue estimatedTimeInterval = ESTIMATED_TIME_INTERVAL_SETTING.get(settings); - this.cachedTimeThread = new CachedTimeThread(OpenSearchExecutors.threadName(settings, "[timer]"), estimatedTimeInterval.millis()); - this.cachedTimeThread.start(); - } - - /** - * Returns a value of milliseconds that may be used for relative time calculations. - * - * This method should only be used for calculating time deltas. For an epoch based - * timestamp, see {@link #absoluteTimeInMillis()}. - */ - public long relativeTimeInMillis() { - return TimeValue.nsecToMSec(relativeTimeInNanos()); - } - - /** - * Returns a value of nanoseconds that may be used for relative time calculations. - * - * This method should only be used for calculating time deltas. For an epoch based - * timestamp, see {@link #absoluteTimeInMillis()}. - */ - public long relativeTimeInNanos() { - return cachedTimeThread.relativeTimeInNanos(); - } - - /** - * Returns the value of milliseconds since UNIX epoch. - * - * This method should only be used for exact date/time formatting. For calculating - * time deltas that should not suffer from negative deltas, which are possible with - * this method, see {@link #relativeTimeInMillis()}. - */ - public long absoluteTimeInMillis() { - return cachedTimeThread.absoluteTimeInMillis(); - } - - @Override - public ProtobufThreadPoolInfo info() { - return threadPoolInfo; - } - - public Info info(String name) { - ExecutorHolder holder = executors.get(name); - if (holder == null) { - return null; - } - return holder.info; - } - - public ThreadPoolStats stats() { - List stats = new ArrayList<>(); - for (ExecutorHolder holder : executors.values()) { - final String name = holder.info.getName(); - // no need to have info on "same" thread pool - if ("same".equals(name)) { - continue; - } - int threads = -1; - int queue = -1; - int active = -1; - long rejected = -1; - int largest = -1; - long completed = -1; - if (holder.executor() instanceof ThreadPoolExecutor) { - ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) holder.executor(); - threads = threadPoolExecutor.getPoolSize(); - queue = threadPoolExecutor.getQueue().size(); - active = threadPoolExecutor.getActiveCount(); - largest = threadPoolExecutor.getLargestPoolSize(); - completed = threadPoolExecutor.getCompletedTaskCount(); - RejectedExecutionHandler rejectedExecutionHandler = threadPoolExecutor.getRejectedExecutionHandler(); - if (rejectedExecutionHandler instanceof XRejectedExecutionHandler) { - rejected = ((XRejectedExecutionHandler) rejectedExecutionHandler).rejected(); - } - } - stats.add(new ThreadPoolStats.Stats(name, threads, queue, active, rejected, largest, completed)); - } - return new ThreadPoolStats(stats); - } - - /** - * Get the generic {@link ExecutorService}. This executor service - * {@link Executor#execute(Runnable)} method will run the {@link Runnable} it is given in the - * {@link ThreadContext} of the thread that queues it. - *

- * Warning: this {@linkplain ExecutorService} will not throw {@link RejectedExecutionException} - * if you submit a task while it shutdown. It will instead silently queue it and not run it. - */ - public ExecutorService generic() { - return executor(Names.GENERIC); - } - - /** - * Get the {@link ExecutorService} with the given name. This executor service's - * {@link Executor#execute(Runnable)} method will run the {@link Runnable} it is given in the - * {@link ThreadContext} of the thread that queues it. - *

- * Warning: this {@linkplain ExecutorService} might not throw {@link RejectedExecutionException} - * if you submit a task while it shutdown. It will instead silently queue it and not run it. - * - * @param name the name of the executor service to obtain - * @throws IllegalArgumentException if no executor service with the specified name exists - */ - public ExecutorService executor(String name) { - final ExecutorHolder holder = executors.get(name); - if (holder == null) { - throw new IllegalArgumentException("no executor service found for [" + name + "]"); - } - return holder.executor(); - } - - /** - * Schedules a one-shot command to run after a given delay. The command is run in the context of the calling thread. - * - * @param command the command to run - * @param delay delay before the task executes - * @param executor the name of the thread pool on which to execute this task. SAME means "execute on the scheduler thread" which changes - * the meaning of the ScheduledFuture returned by this method. In that case the ScheduledFuture will complete only when the - * command completes. - * @return a ScheduledFuture who's get will return when the task is has been added to its target thread pool and throw an exception if - * the task is canceled before it was added to its target thread pool. Once the task has been added to its target thread pool - * the ScheduledFuture will cannot interact with it. - * @throws OpenSearchRejectedExecutionException if the task cannot be scheduled for execution - */ - @Override - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String executor) { - command = threadContext.preserveContext(command); - if (!Names.SAME.equals(executor)) { - command = new ThreadedRunnable(command, executor(executor)); - } - return new ScheduledCancellableAdapter(scheduler.schedule(command, delay.millis(), TimeUnit.MILLISECONDS)); - } - - public void scheduleUnlessShuttingDown(TimeValue delay, String executor, Runnable command) { - try { - schedule(command, delay, executor); - } catch (OpenSearchRejectedExecutionException e) { - if (e.isExecutorShutdown()) { - logger.debug( - new ParameterizedMessage( - "could not schedule execution of [{}] after [{}] on [{}] as executor is shut down", - command, - delay, - executor - ), - e - ); - } else { - throw e; - } - } - } - - @Override - public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, String executor) { - return new ReschedulingRunnable(command, interval, executor, this, (e) -> { - if (logger.isDebugEnabled()) { - logger.debug(() -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", command, executor), e); - } - }, - (e) -> logger.warn( - () -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", command, executor), - e - ) - ); - } - - protected final void stopCachedTimeThread() { - cachedTimeThread.running = false; - cachedTimeThread.interrupt(); - } - - public void shutdown() { - stopCachedTimeThread(); - scheduler.shutdown(); - for (ExecutorHolder executor : executors.values()) { - if (executor.executor() instanceof ThreadPoolExecutor) { - executor.executor().shutdown(); - } - } - } - - public void shutdownNow() { - stopCachedTimeThread(); - scheduler.shutdownNow(); - for (ExecutorHolder executor : executors.values()) { - if (executor.executor() instanceof ThreadPoolExecutor) { - executor.executor().shutdownNow(); - } - } - } - - public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { - boolean result = scheduler.awaitTermination(timeout, unit); - for (ExecutorHolder executor : executors.values()) { - if (executor.executor() instanceof ThreadPoolExecutor) { - result &= executor.executor().awaitTermination(timeout, unit); - } - } - cachedTimeThread.join(unit.toMillis(timeout)); - return result; - } - - public ScheduledExecutorService scheduler() { - return this.scheduler; - } - - /** - * Constrains a value between minimum and maximum values - * (inclusive). - * - * @param value the value to constrain - * @param min the minimum acceptable value - * @param max the maximum acceptable value - * @return min if value is less than min, max if value is greater - * than value, otherwise value - */ - static int boundedBy(int value, int min, int max) { - return Math.min(max, Math.max(min, value)); - } - - static int halfAllocatedProcessorsMaxFive(final int allocatedProcessors) { - return boundedBy((allocatedProcessors + 1) / 2, 1, 5); - } - - static int halfAllocatedProcessorsMaxTen(final int allocatedProcessors) { - return boundedBy((allocatedProcessors + 1) / 2, 1, 10); - } - - static int twiceAllocatedProcessors(final int allocatedProcessors) { - return boundedBy(2 * allocatedProcessors, 2, Integer.MAX_VALUE); - } - - public static int searchThreadPoolSize(final int allocatedProcessors) { - return ((allocatedProcessors * 3) / 2) + 1; - } - - class LoggingRunnable implements Runnable { - - private final Runnable runnable; - - LoggingRunnable(Runnable runnable) { - this.runnable = runnable; - } - - @Override - public void run() { - try { - runnable.run(); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("failed to run {}", runnable.toString()), e); - throw e; - } - } - - @Override - public int hashCode() { - return runnable.hashCode(); - } - - @Override - public boolean equals(Object obj) { - return runnable.equals(obj); - } - - @Override - public String toString() { - return "[threaded] " + runnable.toString(); - } - } - - class ThreadedRunnable implements Runnable { - - private final Runnable runnable; - - private final Executor executor; - - ThreadedRunnable(Runnable runnable, Executor executor) { - this.runnable = runnable; - this.executor = executor; - } - - @Override - public void run() { - try { - executor.execute(runnable); - } catch (OpenSearchRejectedExecutionException e) { - if (e.isExecutorShutdown()) { - logger.debug( - new ParameterizedMessage( - "could not schedule execution of [{}] on [{}] as executor is shut down", - runnable, - executor - ), - e - ); - } else { - throw e; - } - } - } - - @Override - public int hashCode() { - return runnable.hashCode(); - } - - @Override - public boolean equals(Object obj) { - return runnable.equals(obj); - } - - @Override - public String toString() { - return "[threaded] " + runnable.toString(); - } - } - - /** - * A thread to cache millisecond time values from - * {@link System#nanoTime()} and {@link System#currentTimeMillis()}. - * - * The values are updated at a specified interval. - */ - static class CachedTimeThread extends Thread { - - final long interval; - volatile boolean running = true; - volatile long relativeNanos; - volatile long absoluteMillis; - - CachedTimeThread(String name, long interval) { - super(name); - this.interval = interval; - this.relativeNanos = System.nanoTime(); - this.absoluteMillis = System.currentTimeMillis(); - setDaemon(true); - } - - /** - * Return the current time used for relative calculations. This is {@link System#nanoTime()}. - *

- * If {@link ThreadPool#ESTIMATED_TIME_INTERVAL_SETTING} is set to 0 - * then the cache is disabled and the method calls {@link System#nanoTime()} - * whenever called. Typically used for testing. - */ - long relativeTimeInNanos() { - if (0 < interval) { - return relativeNanos; - } - return System.nanoTime(); - } - - /** - * Return the current epoch time, used to find absolute time. This is - * a cached version of {@link System#currentTimeMillis()}. - *

- * If {@link ThreadPool#ESTIMATED_TIME_INTERVAL_SETTING} is set to 0 - * then the cache is disabled and the method calls {@link System#currentTimeMillis()} - * whenever called. Typically used for testing. - */ - long absoluteTimeInMillis() { - if (0 < interval) { - return absoluteMillis; - } - return System.currentTimeMillis(); - } - - @Override - public void run() { - while (running && 0 < interval) { - relativeNanos = System.nanoTime(); - absoluteMillis = System.currentTimeMillis(); - try { - Thread.sleep(interval); - } catch (InterruptedException e) { - running = false; - return; - } - } - } - } - - static class ExecutorHolder { - private final ExecutorService executor; - public final Info info; - - ExecutorHolder(ExecutorService executor, Info info) { - assert executor instanceof OpenSearchThreadPoolExecutor || executor == DIRECT_EXECUTOR; - this.executor = executor; - this.info = info; - } - - ExecutorService executor() { - return executor; - } - } - - /** - * The thread pool information. - * - * @opensearch.internal - */ - public static class Info implements ProtobufWriteable { - - private final String name; - private final ThreadPoolType type; - private final int min; - private final int max; - private final TimeValue keepAlive; - private final ProtobufSizeValue queueSize; - - public Info(String name, ThreadPoolType type) { - this(name, type, -1); - } - - public Info(String name, ThreadPoolType type, int size) { - this(name, type, size, size, null, null); - } - - public Info( - String name, - ThreadPoolType type, - int min, - int max, - @Nullable TimeValue keepAlive, - @Nullable ProtobufSizeValue queueSize - ) { - this.name = name; - this.type = type; - this.min = min; - this.max = max; - this.keepAlive = keepAlive; - this.queueSize = queueSize; - } - - public Info(CodedInputStream in) throws IOException { - ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); - name = in.readString(); - final String typeStr = in.readString(); - // Opensearch on or after 3.0.0 version doesn't know about "fixed_auto_queue_size" thread pool. Convert it to RESIZABLE. - if (typeStr.equalsIgnoreCase("fixed_auto_queue_size")) { - type = ThreadPoolType.RESIZABLE; - } else { - type = ThreadPoolType.fromType(typeStr); - } - min = in.readInt32(); - max = in.readInt32(); - keepAlive = protobufStreamInput.readOptionalTimeValue(); - queueSize = protobufStreamInput.readOptionalWriteable(ProtobufSizeValue::new); - } - - @Override - public void writeTo(CodedOutputStream out) throws IOException { - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); - out.writeStringNoTag(name); - if (type == ThreadPoolType.RESIZABLE && protobufStreamOutput.getVersion().before(Version.V_3_0_0)) { - // Opensearch on older version doesn't know about "resizable" thread pool. Convert RESIZABLE to FIXED - // to avoid serialization/de-serization issue between nodes with different OpenSearch version - out.writeStringNoTag(ThreadPoolType.FIXED.getType()); - } else { - out.writeStringNoTag(type.getType()); - } - out.writeInt32NoTag(min); - out.writeInt32NoTag(max); - protobufStreamOutput.writeOptionalTimeValue(keepAlive); - protobufStreamOutput.writeOptionalWriteable(queueSize); - } - - public String getName() { - return this.name; - } - - public ThreadPoolType getThreadPoolType() { - return this.type; - } - - public int getMin() { - return this.min; - } - - public int getMax() { - return this.max; - } - - @Nullable - public TimeValue getKeepAlive() { - return this.keepAlive; - } - - @Nullable - public ProtobufSizeValue getQueueSize() { - return this.queueSize; - } - } - - /** - * Returns true if the given service was terminated successfully. If the termination timed out, - * the service is null this method will return false. - */ - public static boolean terminate(ExecutorService service, long timeout, TimeUnit timeUnit) { - if (service != null) { - service.shutdown(); - if (awaitTermination(service, timeout, timeUnit)) return true; - service.shutdownNow(); - return awaitTermination(service, timeout, timeUnit); - } - return false; - } - - private static boolean awaitTermination(final ExecutorService service, final long timeout, final TimeUnit timeUnit) { - try { - if (service.awaitTermination(timeout, timeUnit)) { - return true; - } - } catch (final InterruptedException e) { - Thread.currentThread().interrupt(); - } - return false; - } - - /** - * Returns true if the given pool was terminated successfully. If the termination timed out, - * the service is null this method will return false. - */ - public static boolean terminate(ThreadPool pool, long timeout, TimeUnit timeUnit) { - if (pool != null) { - // Leverage try-with-resources to close the threadpool - pool.shutdown(); - if (awaitTermination(pool, timeout, timeUnit)) { - return true; - } - // last resort - pool.shutdownNow(); - return awaitTermination(pool, timeout, timeUnit); - } - return false; - } - - private static boolean awaitTermination(final ThreadPool threadPool, final long timeout, final TimeUnit timeUnit) { - try { - if (threadPool.awaitTermination(timeout, timeUnit)) { - return true; - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - return false; - } - - public ThreadContext getThreadContext() { - return threadContext; - } - - public static boolean assertNotScheduleThread(String reason) { - assert Thread.currentThread().getName().contains("scheduler") == false : "Expected current thread [" - + Thread.currentThread() - + "] to not be the scheduler thread. Reason: [" - + reason - + "]"; - return true; - } - - public static boolean assertCurrentMethodIsNotCalledRecursively() { - final StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); - assert stackTraceElements.length >= 3 : stackTraceElements.length; - assert stackTraceElements[0].getMethodName().equals("getStackTrace") : stackTraceElements[0]; - assert stackTraceElements[1].getMethodName().equals("assertCurrentMethodIsNotCalledRecursively") : stackTraceElements[1]; - final StackTraceElement testingMethod = stackTraceElements[2]; - for (int i = 3; i < stackTraceElements.length; i++) { - assert stackTraceElements[i].getClassName().equals(testingMethod.getClassName()) == false - || stackTraceElements[i].getMethodName().equals(testingMethod.getMethodName()) == false : testingMethod.getClassName() - + "#" - + testingMethod.getMethodName() - + " is called recursively"; - } - return true; - } -} diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java index f4a67d306f4bc..c9e3a51bb077c 100644 --- a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java +++ b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java @@ -25,17 +25,17 @@ * * @opensearch.internal */ -public class ProtobufThreadPoolInfo implements ProtobufReportingService.ProtobufInfo, Iterable { +public class ProtobufThreadPoolInfo implements ProtobufReportingService.ProtobufInfo, Iterable { - private final List infos; + private final List infos; - public ProtobufThreadPoolInfo(List infos) { + public ProtobufThreadPoolInfo(List infos) { this.infos = Collections.unmodifiableList(infos); } public ProtobufThreadPoolInfo(CodedInputStream in) throws IOException { ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); - this.infos = Collections.unmodifiableList(protobufStreamInput.readList(ProtobufThreadPool.Info::new)); + this.infos = Collections.unmodifiableList(protobufStreamInput.readList(ThreadPool.Info::new)); } @Override @@ -45,7 +45,7 @@ public void writeTo(CodedOutputStream out) throws IOException { } @Override - public Iterator iterator() { + public Iterator iterator() { return infos.iterator(); } } diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 0851677bcb13a..fa94ec0156090 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -32,16 +32,22 @@ package org.opensearch.threadpool; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ProtobufSizeValue; import org.opensearch.common.unit.SizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; @@ -53,10 +59,12 @@ import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.Node; +import org.opensearch.node.ProtobufReportingService; import org.opensearch.node.ReportingService; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -80,7 +88,7 @@ * * @opensearch.internal */ -public class ThreadPool implements ReportingService, Scheduler { +public class ThreadPool implements ReportingService, Scheduler, ProtobufReportingService { private static final Logger logger = LogManager.getLogger(ThreadPool.class); @@ -359,6 +367,13 @@ public ThreadPoolInfo info() { return threadPoolInfo; } + @Override + public ProtobufThreadPoolInfo protobufInfo() { + List threadPoolInfos = new ArrayList<>(); + threadPoolInfo.iterator().forEachRemaining(threadPoolInfos::add); + return new ProtobufThreadPoolInfo(threadPoolInfos); + } + public Info info(String name) { ExecutorHolder holder = executors.get(name); if (holder == null) { @@ -398,6 +413,37 @@ public ThreadPoolStats stats() { return new ThreadPoolStats(stats); } + public ProtobufThreadPoolStats protobufStats() { + List stats = new ArrayList<>(); + for (ExecutorHolder holder : executors.values()) { + final String name = holder.info.getName(); + // no need to have info on "same" thread pool + if ("same".equals(name)) { + continue; + } + int threads = -1; + int queue = -1; + int active = -1; + long rejected = -1; + int largest = -1; + long completed = -1; + if (holder.executor() instanceof ThreadPoolExecutor) { + ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) holder.executor(); + threads = threadPoolExecutor.getPoolSize(); + queue = threadPoolExecutor.getQueue().size(); + active = threadPoolExecutor.getActiveCount(); + largest = threadPoolExecutor.getLargestPoolSize(); + completed = threadPoolExecutor.getCompletedTaskCount(); + RejectedExecutionHandler rejectedExecutionHandler = threadPoolExecutor.getRejectedExecutionHandler(); + if (rejectedExecutionHandler instanceof XRejectedExecutionHandler) { + rejected = ((XRejectedExecutionHandler) rejectedExecutionHandler).rejected(); + } + } + stats.add(new ProtobufThreadPoolStats.Stats(name, threads, queue, active, rejected, largest, completed)); + } + return new ProtobufThreadPoolStats(stats); + } + /** * Get the generic {@link ExecutorService}. This executor service * {@link Executor#execute(Runnable)} method will run the {@link Runnable} it is given in the @@ -721,7 +767,7 @@ ExecutorService executor() { * * @opensearch.internal */ - public static class Info implements Writeable, ToXContentFragment { + public static class Info implements Writeable, ToXContentFragment, ProtobufWriteable { private final String name; private final ThreadPoolType type; @@ -778,6 +824,40 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(queueSize); } + public Info(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + name = in.readString(); + final String typeStr = in.readString(); + // Opensearch on or after 3.0.0 version doesn't know about "fixed_auto_queue_size" thread pool. Convert it to RESIZABLE. + if (typeStr.equalsIgnoreCase("fixed_auto_queue_size")) { + type = ThreadPoolType.RESIZABLE; + } else { + type = ThreadPoolType.fromType(typeStr); + } + min = in.readInt32(); + max = in.readInt32(); + keepAlive = protobufStreamInput.readOptionalTimeValue(); + ProtobufSizeValue protobufQueueSize = protobufStreamInput.readOptionalWriteable(ProtobufSizeValue::new); + queueSize = new SizeValue(protobufQueueSize.getSize(), protobufQueueSize.getSizeUnit()); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + out.writeStringNoTag(name); + if (type == ThreadPoolType.RESIZABLE && protobufStreamOutput.getVersion().before(Version.V_3_0_0)) { + // Opensearch on older version doesn't know about "resizable" thread pool. Convert RESIZABLE to FIXED + // to avoid serialization/de-serization issue between nodes with different OpenSearch version + out.writeStringNoTag(ThreadPoolType.FIXED.getType()); + } else { + out.writeStringNoTag(type.getType()); + } + out.writeInt32NoTag(min); + out.writeInt32NoTag(max); + protobufStreamOutput.writeOptionalTimeValue(keepAlive); + protobufStreamOutput.writeOptionalWriteable(new ProtobufSizeValue(queueSize.getSize(), queueSize.getSizeUnit())); + } + public String getName() { return this.name; } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java b/server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java index d8a3a7e87c80b..377451f06892d 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java @@ -39,16 +39,16 @@ public class ProtobufClusterConnectionManager implements ProtobufConnectionManag private static final Logger logger = LogManager.getLogger(ProtobufClusterConnectionManager.class); - private final ConcurrentMap connectedNodes = ConcurrentCollections + private final ConcurrentMap connectedNodes = ConcurrentCollections .newConcurrentMap(); private final ConcurrentMap> pendingConnections = ConcurrentCollections .newConcurrentMap(); private final AbstractRefCounted connectingRefCounter = new AbstractRefCounted("connection manager") { @Override protected void closeInternal() { - Iterator> iterator = connectedNodes.entrySet().iterator(); + Iterator> iterator = connectedNodes.entrySet().iterator(); while (iterator.hasNext()) { - Map.Entry next = iterator.next(); + Map.Entry next = iterator.next(); try { IOUtils.closeWhileHandlingException(next.getValue()); } finally { @@ -58,17 +58,17 @@ protected void closeInternal() { closeLatch.countDown(); } }; - private final ProtobufTransport transport; + private final Transport transport; private final ProtobufConnectionProfile defaultProfile; private final AtomicBoolean closing = new AtomicBoolean(false); private final CountDownLatch closeLatch = new CountDownLatch(1); private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); - public ProtobufClusterConnectionManager(Settings settings, ProtobufTransport transport) { + public ProtobufClusterConnectionManager(Settings settings, Transport transport) { this(ProtobufConnectionProfile.buildDefaultConnectionProfile(settings), transport); } - public ProtobufClusterConnectionManager(ProtobufConnectionProfile connectionProfile, ProtobufTransport transport) { + public ProtobufClusterConnectionManager(ProtobufConnectionProfile connectionProfile, Transport transport) { this.transport = transport; this.defaultProfile = connectionProfile; } @@ -87,7 +87,7 @@ public void removeListener(ProtobufTransportConnectionListener listener) { public void openConnection( ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile, - ActionListener listener + ActionListener listener ) { ProtobufConnectionProfile resolvedProfile = ProtobufConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile); internalOpenConnection(node, resolvedProfile, listener); @@ -149,7 +149,7 @@ public void connectToNode( try { connectionListener.onNodeConnected(node, conn); } finally { - final ProtobufTransport.Connection finalConnection = conn; + final Transport.ProtobufConnection finalConnection = conn; conn.addCloseListener(ActionListener.wrap(() -> { logger.trace("unregistering {} after connection close and marking as disconnected", node); connectedNodes.remove(node, finalConnection); @@ -183,8 +183,8 @@ public void connectToNode( * @see #connectToNode(ProtobufDiscoveryNode, ProtobufConnectionProfile, ConnectionValidator, ActionListener) */ @Override - public ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode node) { - ProtobufTransport.Connection connection = connectedNodes.get(node); + public Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node) { + Transport.ProtobufConnection connection = connectedNodes.get(node); if (connection == null) { throw new ProtobufNodeNotConnectedException(node, "Node not connected"); } @@ -204,7 +204,7 @@ public boolean nodeConnected(ProtobufDiscoveryNode node) { */ @Override public void disconnectFromNode(ProtobufDiscoveryNode node) { - ProtobufTransport.Connection nodeChannels = connectedNodes.remove(node); + Transport.ProtobufConnection nodeChannels = connectedNodes.remove(node); if (nodeChannels != null) { // if we found it and removed it we close nodeChannels.close(); @@ -252,9 +252,9 @@ private void internalClose(boolean waitForPendingConnections) { private void internalOpenConnection( ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile, - ActionListener listener + ActionListener listener ) { - transport.openConnection(node, connectionProfile, ActionListener.map(listener, connection -> { + transport.openProtobufConnection(node, connectionProfile, ActionListener.map(listener, connection -> { assert Transports.assertNotTransportThread("internalOpenConnection success"); try { connectionListener.onConnectionOpened(connection); diff --git a/server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java b/server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java index 8e94a43d53d37..05d059955d0fb 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java @@ -29,7 +29,7 @@ public interface ProtobufConnectionManager extends Closeable { void openConnection( ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile, - ActionListener listener + ActionListener listener ); void connectToNode( @@ -39,7 +39,7 @@ void connectToNode( ActionListener listener ) throws ConnectTransportException; - ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode node); + Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node); boolean nodeConnected(ProtobufDiscoveryNode node); @@ -63,7 +63,7 @@ void connectToNode( */ @FunctionalInterface interface ConnectionValidator { - void validate(ProtobufTransport.Connection connection, ProtobufConnectionProfile profile, ActionListener listener); + void validate(Transport.ProtobufConnection connection, ProtobufConnectionProfile profile, ActionListener listener); } /** @@ -76,28 +76,28 @@ final class DelegatingNodeConnectionListener implements ProtobufTransportConnect private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); @Override - public void onNodeDisconnected(ProtobufDiscoveryNode key, ProtobufTransport.Connection connection) { + public void onNodeDisconnected(ProtobufDiscoveryNode key, Transport.ProtobufConnection connection) { for (ProtobufTransportConnectionListener listener : listeners) { listener.onNodeDisconnected(key, connection); } } @Override - public void onNodeConnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) { + public void onNodeConnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) { for (ProtobufTransportConnectionListener listener : listeners) { listener.onNodeConnected(node, connection); } } @Override - public void onConnectionOpened(ProtobufTransport.Connection connection) { + public void onConnectionOpened(Transport.ProtobufConnection connection) { for (ProtobufTransportConnectionListener listener : listeners) { listener.onConnectionOpened(connection); } } @Override - public void onConnectionClosed(ProtobufTransport.Connection connection) { + public void onConnectionClosed(Transport.ProtobufConnection connection) { for (ProtobufTransportConnectionListener listener : listeners) { listener.onConnectionClosed(connection); } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java b/server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java index e18057f7818df..365bed06c42f7 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java @@ -24,7 +24,7 @@ import org.opensearch.common.transport.NetworkExceptionHelper; import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.util.Set; @@ -42,7 +42,7 @@ final class ProtobufOutboundHandler { private final Version version; private final String[] features; private final StatsTracker statsTracker; - private final ProtobufThreadPool threadPool; + private final ThreadPool threadPool; // private final BigArrays bigArrays; private final byte[] bytes; private volatile ProtobufTransportMessageListener messageListener = ProtobufTransportMessageListener.NOOP_LISTENER; @@ -52,7 +52,7 @@ final class ProtobufOutboundHandler { Version version, String[] features, StatsTracker statsTracker, - ProtobufThreadPool threadPool, + ThreadPool threadPool, byte[] bytes ) { this.nodeName = nodeName; diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java index c400597c97d4f..a5317b0fcee62 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java @@ -17,7 +17,7 @@ import org.opensearch.client.support.ProtobufAbstractClient; import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.common.settings.Settings; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; /** * ProtobufClient that is aware of remote clusters @@ -28,11 +28,11 @@ final class ProtobufRemoteClusterAwareClient extends ProtobufAbstractClient { private final ProtobufTransportService service; private final String clusterAlias; - private final ProtobufRemoteClusterService remoteClusterService; + private final RemoteClusterService remoteClusterService; ProtobufRemoteClusterAwareClient( Settings settings, - ProtobufThreadPool threadPool, + ThreadPool threadPool, ProtobufTransportService service, String clusterAlias ) { @@ -49,12 +49,12 @@ protected listener ) { remoteClusterService.ensureConnected(clusterAlias, ActionListener.wrap(v -> { - ProtobufTransport.Connection connection; + Transport.ProtobufConnection connection; if (request instanceof ProtobufRemoteClusterAwareRequest) { ProtobufDiscoveryNode preferredTargetNode = ((ProtobufRemoteClusterAwareRequest) request).getPreferredTargetNode(); - connection = remoteClusterService.getConnection(preferredTargetNode, clusterAlias); + connection = remoteClusterService.getConnectionProtobuf(preferredTargetNode, clusterAlias); } else { - connection = remoteClusterService.getConnection(clusterAlias); + connection = remoteClusterService.getConnectionProtobuf(clusterAlias); } service.sendRequest( connection, @@ -73,6 +73,6 @@ public void close() { @Override public ProtobufClient getRemoteClusterClient(String clusterAlias) { - return remoteClusterService.getRemoteClusterClient(threadPool(), clusterAlias); + return remoteClusterService.getRemoteClusterClientProtobuf(threadPool(), clusterAlias); } } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java index 58d90365f0ef5..ef896f62e856e 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java @@ -20,7 +20,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; @@ -47,7 +47,7 @@ final class ProtobufRemoteClusterConnection implements Closeable { private final ProtobufRemoteConnectionManager remoteConnectionManager; private final ProtobufRemoteConnectionStrategy connectionStrategy; private final String clusterAlias; - private final ProtobufThreadPool threadPool; + private final ThreadPool threadPool; private volatile boolean skipUnavailable; private final TimeValue initialConnectionTimeout; @@ -127,7 +127,7 @@ void collectNodes(ActionListener> listen request.clear(); request.nodes(true); request.local(true); // run this on the node that gets the request it's as good as any other - ProtobufTransport.Connection connection = remoteConnectionManager.getAnyRemoteConnection(); + Transport.ProtobufConnection connection = remoteConnectionManager.getAnyRemoteConnection(); transportService.sendRequest( connection, ClusterStateAction.NAME, @@ -153,7 +153,7 @@ public void handleException(ProtobufTransportException exp) { @Override public String executor() { - return ProtobufThreadPool.Names.SAME; + return ThreadPool.Names.SAME; } } ); @@ -175,11 +175,11 @@ public String executor() { * Returns a connection to the remote cluster, preferably a direct connection to the provided {@link ProtobufDiscoveryNode}. * If such node is not connected, the returned connection will be a proxy connection that redirects to it. */ - ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode remoteClusterNode) { + Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode remoteClusterNode) { return remoteConnectionManager.getConnection(remoteClusterNode); } - ProtobufTransport.Connection getConnection() { + Transport.ProtobufConnection getConnection() { return remoteConnectionManager.getAnyRemoteConnection(); } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java index 4148beb90c6e1..eef008cbb398e 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java @@ -26,7 +26,7 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; @@ -185,7 +185,7 @@ public Set getRegisteredRemoteClusterNames() { * * @throws IllegalArgumentException if the remote cluster is unknown */ - public ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode node, String cluster) { + public Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node, String cluster) { return getRemoteClusterConnection(cluster).getConnection(node); } @@ -204,7 +204,7 @@ public boolean isSkipUnavailable(String clusterAlias) { return getRemoteClusterConnection(clusterAlias).isSkipUnavailable(); } - public ProtobufTransport.Connection getConnection(String cluster) { + public Transport.ProtobufConnection getConnection(String cluster) { return getRemoteClusterConnection(cluster).getConnection(); } @@ -392,11 +392,11 @@ public void onFailure(Exception e) { /** * Returns a client to the remote cluster if the given cluster alias exists. * - * @param threadPool the {@link ProtobufThreadPool} for the client + * @param threadPool the {@link ThreadPool} for the client * @param clusterAlias the cluster alias the remote cluster is registered under * @throws IllegalArgumentException if the given clusterAlias doesn't exist */ - public ProtobufClient getRemoteClusterClient(ProtobufThreadPool threadPool, String clusterAlias) { + public ProtobufClient getRemoteClusterClient(ThreadPool threadPool, String clusterAlias) { if (transportService.getRemoteClusterService().isEnabled() == false) { throw new IllegalArgumentException( "this node does not have the " + DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName() + " role" diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java index a8608b80768a6..95c0e2a9624bd 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java @@ -36,12 +36,12 @@ public class ProtobufRemoteConnectionManager implements ProtobufConnectionManage this.delegate = delegate; this.delegate.addListener(new ProtobufTransportConnectionListener() { @Override - public void onNodeConnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) { + public void onNodeConnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) { addConnectedNode(node); } @Override - public void onNodeDisconnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) { + public void onNodeDisconnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) { removeConnectedNode(node); } }); @@ -71,13 +71,13 @@ public void removeListener(ProtobufTransportConnectionListener listener) { public void openConnection( ProtobufDiscoveryNode node, ProtobufConnectionProfile profile, - ActionListener listener + ActionListener listener ) { delegate.openConnection(node, profile, listener); } @Override - public ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode node) { + public Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node) { try { return delegate.getConnection(node); } catch (NodeNotConnectedException e) { @@ -100,7 +100,7 @@ public ProtobufConnectionProfile getConnectionProfile() { return delegate.getConnectionProfile(); } - public ProtobufTransport.Connection getAnyRemoteConnection() { + public Transport.ProtobufConnection getAnyRemoteConnection() { List localConnectedNodes = this.connectedNodes; long curr; while ((curr = counter.incrementAndGet()) == Long.MIN_VALUE) @@ -164,11 +164,11 @@ private synchronized void removeConnectedNode(ProtobufDiscoveryNode removedNode) this.connectedNodes = Collections.unmodifiableList(newConnectedNodes); } - static final class ProxyConnection implements ProtobufTransport.Connection { - private final ProtobufTransport.Connection connection; + static final class ProxyConnection implements Transport.ProtobufConnection { + private final Transport.ProtobufConnection connection; private final ProtobufDiscoveryNode targetNode; - private ProxyConnection(ProtobufTransport.Connection connection, ProtobufDiscoveryNode targetNode) { + private ProxyConnection(Transport.ProtobufConnection connection, ProtobufDiscoveryNode targetNode) { this.connection = connection; this.targetNode = targetNode; } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java index 0ee4507fc98a8..53f3ad80acc4e 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java @@ -361,7 +361,7 @@ boolean shouldRebuildConnection(Settings newSettings) { protected abstract ProtobufConnectionStrategy strategyType(); @Override - public void onNodeDisconnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) { + public void onNodeDisconnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) { if (shouldOpenMoreConnections()) { // try to reconnect and fill up the slot of the disconnected node connect( diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java index 6e4fdbfc4f053..8a2d15db70e11 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java @@ -15,7 +15,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.ProtobufCancellableTask; import org.opensearch.tasks.ProtobufTask; -import org.opensearch.tasks.ProtobufTaskManager; +import org.opensearch.tasks.TaskManager; import java.io.IOException; @@ -31,13 +31,13 @@ public final class ProtobufRequestHandlerRegistry requestReader; public ProtobufRequestHandlerRegistry( String action, ProtobufWriteable.Reader requestReader, - ProtobufTaskManager taskManager, + TaskManager taskManager, ProtobufTransportRequestHandler handler, String executor, boolean forceExecution, @@ -61,10 +61,10 @@ public Request newRequest(CodedInputStream in) throws IOException { } public void processMessageReceived(Request request, ProtobufTransportChannel channel) throws Exception { - final ProtobufTask task = taskManager.register(channel.getChannelType(), action, request); - ThreadContext.StoredContext contextToRestore = taskManager.taskExecutionStarted(task); + final ProtobufTask task = taskManager.registerProtobuf(channel.getChannelType(), action, request); + ThreadContext.StoredContext contextToRestore = taskManager.protobufTaskExecutionStarted(task); - Releasable unregisterTask = () -> taskManager.unregister(task); + Releasable unregisterTask = () -> taskManager.unregisterProtobufTask(task); try { if (channel instanceof ProtobufTcpTransportChannel && task instanceof ProtobufCancellableTask) { // if (request instanceof ShardSearchRequest) { @@ -74,7 +74,7 @@ public void processMessageReceived(Request request, ProtobufTransportChannel cha // ); // } final TcpChannel tcpChannel = ((ProtobufTcpTransportChannel) channel).getChannel(); - final Releasable stopTracking = taskManager.startTrackingCancellableChannelTask(tcpChannel, (ProtobufCancellableTask) task); + final Releasable stopTracking = taskManager.startProtobufTrackingCancellableChannelTask(tcpChannel, (ProtobufCancellableTask) task); unregisterTask = Releasables.wrap(unregisterTask, stopTracking); } final ProtobufTaskTransportChannel taskTransportChannel = new ProtobufTaskTransportChannel(channel, unregisterTask); diff --git a/server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java index 36c851592a6fd..f503ff0951eda 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java @@ -33,7 +33,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.net.InetSocketAddress; @@ -256,7 +256,7 @@ private void collectRemoteNodes(Iterator> seedNo final ProtobufDiscoveryNode seedNode = seedNodes.next().get(); logger.trace("[{}] opening transient connection to seed node: [{}]", clusterAlias, seedNode); - final StepListener openConnectionStep = new StepListener<>(); + final StepListener openConnectionStep = new StepListener<>(); try { connectionManager.openConnection(seedNode, null, openConnectionStep); } catch (Exception e) { @@ -296,7 +296,7 @@ private void collectRemoteNodes(Iterator> seedNo fullConnectionStep.onResponse(null); } }, e -> { - final ProtobufTransport.Connection connection = openConnectionStep.result(); + final Transport.ProtobufConnection connection = openConnectionStep.result(); final ProtobufDiscoveryNode node = connection.getNode(); logger.debug(() -> new ParameterizedMessage("[{}] failed to handshake with seed node: [{}]", clusterAlias, node), e); IOUtils.closeWhileHandlingException(connection); @@ -309,7 +309,7 @@ private void collectRemoteNodes(Iterator> seedNo assert handshakeResponse.getClusterName().value() != null; remoteClusterName.set(handshakeResponse.getClusterName()); } - final ProtobufTransport.Connection connection = openConnectionStep.result(); + final Transport.ProtobufConnection connection = openConnectionStep.result(); ProtobufClusterStateRequest request = new ProtobufClusterStateRequest(); request.clear(); @@ -317,7 +317,7 @@ private void collectRemoteNodes(Iterator> seedNo // here we pass on the connection since we can only close it once the sendRequest returns otherwise // due to the async nature (it will return before it's actually sent) this can cause the request to fail // due to an already closed connection. - ProtobufThreadPool threadPool = transportService.getThreadPool(); + ThreadPool threadPool = transportService.getThreadPool(); ThreadContext threadContext = threadPool.getThreadContext(); ProtobufTransportService.ContextRestoreResponseHandler responseHandler = new ProtobufTransportService.ContextRestoreResponseHandler<>( @@ -337,7 +337,7 @@ private void collectRemoteNodes(Iterator> seedNo ); } }, e -> { - final ProtobufTransport.Connection connection = openConnectionStep.result(); + final Transport.ProtobufConnection connection = openConnectionStep.result(); final ProtobufDiscoveryNode node = connection.getNode(); logger.debug( () -> new ParameterizedMessage("[{}] failed to open managed connection to seed node: [{}]", clusterAlias, node), @@ -354,12 +354,12 @@ private void collectRemoteNodes(Iterator> seedNo /* This class handles the _state response from the remote cluster when sniffing nodes to connect to */ private class SniffClusterStateResponseHandler implements ProtobufTransportResponseHandler { - private final ProtobufTransport.Connection connection; + private final Transport.ProtobufConnection connection; private final ActionListener listener; private final Iterator> seedNodes; SniffClusterStateResponseHandler( - ProtobufTransport.Connection connection, + Transport.ProtobufConnection connection, ActionListener listener, Iterator> seedNodes ) { @@ -447,7 +447,7 @@ public void handleException(ProtobufTransportException exp) { @Override public String executor() { - return ProtobufThreadPool.Names.MANAGEMENT; + return ThreadPool.Names.MANAGEMENT; } } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransport.java b/server/src/main/java/org/opensearch/transport/ProtobufTransport.java index 9a9890255fa67..234583dd011a8 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransport.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransport.java @@ -82,7 +82,7 @@ void openConnection( ActionListener listener ); - TransportStats getStats(); + ProtobufTransportStats getStats(); ResponseHandlers getResponseHandlers(); @@ -241,7 +241,7 @@ public ProtobufTransportResponseHandler onR final ProtobufTransportMessageListener listener ) { ResponseContext context = handlers.remove(requestId); - listener.onResponseReceived(requestId, context); + // listener.onResponseReceived(requestId, context); if (context == null) { return null; } else { diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java index 5c0fdea673c9d..15b51edeabd0f 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java @@ -13,7 +13,7 @@ import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.tasks.ProtobufTask; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.io.UncheckedIOException; @@ -104,7 +104,7 @@ public void handleException(ProtobufTransportException exp) { @Override public String executor() { - return ProtobufThreadPool.Names.SAME; + return ThreadPool.Names.SAME; } } @@ -148,7 +148,7 @@ public static void registerProxyActionWithDynamicResponseType( ProtobufRequestHandlerRegistry requestHandler = service.getRequestHandler(action); service.registerRequestHandler( getProxyAction(action), - ProtobufThreadPool.Names.SAME, + ThreadPool.Names.SAME, true, false, in -> new ProxyRequest<>(in, requestHandler::newRequest), @@ -168,7 +168,7 @@ public static void registerProxyAction( ProtobufRequestHandlerRegistry requestHandler = service.getRequestHandler(action); service.registerRequestHandler( getProxyAction(action), - ProtobufThreadPool.Names.SAME, + ThreadPool.Names.SAME, true, false, in -> new ProxyRequest<>(in, requestHandler::newRequest), diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java index 2f348b72e1a5a..30c66104145e8 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java @@ -23,21 +23,21 @@ public interface ProtobufTransportConnectionListener { * Called once a connection was opened * @param connection the connection */ - default void onConnectionOpened(ProtobufTransport.Connection connection) {} + default void onConnectionOpened(Transport.ProtobufConnection connection) {} /** * Called once a connection ws closed. * @param connection the closed connection */ - default void onConnectionClosed(ProtobufTransport.Connection connection) {} + default void onConnectionClosed(Transport.ProtobufConnection connection) {} /** * Called once a node connection is opened and registered. */ - default void onNodeConnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) {} + default void onNodeConnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) {} /** * Called once a node connection is closed and unregistered. */ - default void onNodeDisconnected(ProtobufDiscoveryNode node, ProtobufTransport.Connection connection) {} + default void onNodeDisconnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) {} } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java index 2476882515de1..bf109d0518693 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java @@ -45,11 +45,11 @@ default AsyncSender interceptSender(AsyncSender sender) { /** * A simple interface to decorate - * {@link #sendRequest(ProtobufTransport.Connection, String, ProtobufTransportRequest, TransportRequestOptions, ProtobufTransportResponseHandler)} + * {@link #sendRequest(Transport.ProtobufConnection, String, ProtobufTransportRequest, TransportRequestOptions, ProtobufTransportResponseHandler)} */ interface AsyncSender { void sendRequest( - ProtobufTransport.Connection connection, + Transport.ProtobufConnection connection, String action, ProtobufTransportRequest request, TransportRequestOptions options, diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java index 0d54c80512c03..5c58c2064801f 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java @@ -65,5 +65,5 @@ default void onRequestSent( * @param requestId the request id for this reponse * @param context the response context or null if the context was already processed ie. due to a timeout. */ - default void onResponseReceived(long requestId, ProtobufTransport.ResponseContext context) {} + default void onResponseReceived(long requestId, Transport.ProtobufResponseContext context) {} } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java index 9f5107009d977..4b1e934977c7e 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java @@ -15,6 +15,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.ProtobufActionListenerResponseHandler; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ProtobufClusterName; @@ -40,9 +41,9 @@ import org.opensearch.node.ProtobufNodeClosedException; import org.opensearch.node.ProtobufReportingService; import org.opensearch.tasks.ProtobufTask; -import org.opensearch.tasks.ProtobufTaskManager; +import org.opensearch.tasks.TaskManager; import org.opensearch.threadpool.Scheduler; -import org.opensearch.threadpool.ProtobufThreadPool; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.io.UncheckedIOException; @@ -79,15 +80,15 @@ public class ProtobufTransportService extends AbstractLifecycleComponent private final AtomicBoolean handleIncomingRequests = new AtomicBoolean(); private final DelegatingTransportMessageListener messageListener = new DelegatingTransportMessageListener(); - protected final ProtobufTransport transport; + protected final Transport transport; protected final ProtobufConnectionManager connectionManager; - protected final ProtobufThreadPool threadPool; + protected final ThreadPool threadPool; protected final ProtobufClusterName clusterName; - protected final ProtobufTaskManager taskManager; + protected final TaskManager taskManager; private final ProtobufTransportInterceptor.AsyncSender asyncSender; private final Function localNodeFactory; private final boolean remoteClusterClient; - private final ProtobufTransport.ResponseHandlers responseHandlers; + private final Transport.ProtobufResponseHandlers responseHandlers; private final ProtobufTransportInterceptor interceptor; // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they @@ -111,11 +112,11 @@ protected boolean removeEldestEntry(Map.Entry eldest) { volatile String[] tracerLogInclude; volatile String[] tracerLogExclude; - private final ProtobufRemoteClusterService remoteClusterService; + private final RemoteClusterService remoteClusterService; /** if set will call requests sent to this id to shortcut and executed locally */ volatile ProtobufDiscoveryNode localNode = null; - private final ProtobufTransport.Connection localNodeConnection = new ProtobufTransport.Connection() { + private final Transport.ProtobufConnection localNodeConnection = new Transport.ProtobufConnection() { @Override public ProtobufDiscoveryNode getNode() { return localNode; @@ -147,8 +148,8 @@ public void close() {} */ public ProtobufTransportService( Settings settings, - ProtobufTransport transport, - ProtobufThreadPool threadPool, + Transport transport, + ThreadPool threadPool, ProtobufTransportInterceptor transportInterceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, @@ -168,8 +169,8 @@ public ProtobufTransportService( public ProtobufTransportService( Settings settings, - ProtobufTransport transport, - ProtobufThreadPool threadPool, + Transport transport, + ThreadPool threadPool, ProtobufTransportInterceptor transportInterceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, @@ -189,8 +190,8 @@ public ProtobufTransportService( this.interceptor = transportInterceptor; this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); this.remoteClusterClient = ProtobufDiscoveryNode.isRemoteClusterClient(settings); - remoteClusterService = new ProtobufRemoteClusterService(settings, this); - responseHandlers = transport.getResponseHandlers(); + remoteClusterService = new RemoteClusterService(settings, this); + responseHandlers = transport.getProtobufResponseHandlers(); if (clusterSettings != null) { clusterSettings.addSettingsUpdateConsumer(TransportSettings.TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude); clusterSettings.addSettingsUpdateConsumer(TransportSettings.TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude); @@ -201,7 +202,7 @@ public ProtobufTransportService( } registerRequestHandler( HANDSHAKE_ACTION_NAME, - ProtobufThreadPool.Names.SAME, + ThreadPool.Names.SAME, false, false, HandshakeRequest::new, @@ -209,7 +210,7 @@ public ProtobufTransportService( ); } - public ProtobufRemoteClusterService getRemoteClusterService() { + public RemoteClusterService getRemoteClusterService() { return remoteClusterService; } @@ -217,20 +218,20 @@ public ProtobufDiscoveryNode getLocalNode() { return localNode; } - public ProtobufTaskManager getTaskManager() { + public TaskManager getTaskManager() { return taskManager; } - protected ProtobufTaskManager createTaskManager( + protected TaskManager createTaskManager( Settings settings, ClusterSettings clusterSettings, - ProtobufThreadPool threadPool, + ThreadPool threadPool, Set taskHeaders ) { if (clusterSettings != null) { - return ProtobufTaskManager.createTaskManagerWithClusterSettings(settings, clusterSettings, threadPool, taskHeaders); + return TaskManager.createTaskManagerWithClusterSettings(settings, clusterSettings, threadPool, taskHeaders); } else { - return new ProtobufTaskManager(settings, threadPool, taskHeaders); + return new TaskManager(settings, threadPool, taskHeaders); } } @@ -258,11 +259,11 @@ protected void doStart() { transport.start(); if (transport.boundAddress() != null && logger.isInfoEnabled()) { logger.info("{}", transport.boundAddress()); - for (Map.Entry entry : transport.profileBoundAddresses().entrySet()) { + for (Map.Entry entry : transport.profileProtobufBoundAddresses().entrySet()) { logger.info("profile [{}]: {}", entry.getKey(), entry.getValue()); } } - localNode = localNodeFactory.apply(transport.boundAddress()); + localNode = localNodeFactory.apply(transport.boundProtobufAddress()); if (remoteClusterClient) { // here we start to connect to the remote clusters @@ -279,7 +280,7 @@ protected void doStop() { } finally { // in case the transport is not connected to our local node (thus cleaned on node disconnect) // make sure to clean any leftover on going handles - for (final ProtobufTransport.ResponseContext holderToNotify : responseHandlers.prune(h -> true)) { + for (final Transport.ProtobufResponseContext holderToNotify : responseHandlers.prune(h -> true)) { // callback that an exception happened, but on a different thread since we don't // want handlers to worry about stack overflows getExecutorService().execute(new AbstractRunnable() { @@ -335,16 +336,16 @@ public final void acceptIncomingRequests() { } @Override - public ProtobufTransportInfo info() { + public ProtobufTransportInfo protobufInfo() { ProtobufBoundTransportAddress boundTransportAddress = boundAddress(); if (boundTransportAddress == null) { return null; } - return new ProtobufTransportInfo(boundTransportAddress, transport.profileBoundAddresses()); + return new ProtobufTransportInfo(boundTransportAddress, transport.profileProtobufBoundAddresses()); } - public TransportStats stats() { - return transport.getStats(); + public ProtobufTransportStats stats() { + return transport.getProtobufStats(); } public boolean isTransportSecure() { @@ -352,7 +353,7 @@ public boolean isTransportSecure() { } public ProtobufBoundTransportAddress boundAddress() { - return transport.boundAddress(); + return transport.boundProtobufAddress(); } public List getDefaultSeedAddresses() { @@ -474,7 +475,7 @@ public ProtobufConnectionManager.ConnectionValidator extensionConnectionValidato * @param node the node to connect to * @param connectionProfile the connection profile to use */ - public ProtobufTransport.Connection openConnection(final ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile) { + public Transport.ProtobufConnection openConnection(final ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile) { return PlainActionFuture.get(fut -> openConnection(node, connectionProfile, fut)); } @@ -489,7 +490,7 @@ public ProtobufTransport.Connection openConnection(final ProtobufDiscoveryNode n public void openConnection( final ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile, - ActionListener listener + ActionListener listener ) { if (isLocalNode(node)) { listener.onResponse(localNodeConnection); @@ -512,7 +513,7 @@ public void openConnection( * @throws IllegalStateException if the handshake failed */ public void handshake( - final ProtobufTransport.Connection connection, + final Transport.ProtobufConnection connection, final long handshakeTimeout, final ActionListener listener ) { @@ -538,7 +539,7 @@ public void handshake( * @throws IllegalStateException if the handshake failed */ public void handshake( - final ProtobufTransport.Connection connection, + final Transport.ProtobufConnection connection, final long handshakeTimeout, Predicate clusterNamePredicate, final ActionListener listener @@ -584,7 +585,7 @@ public void onResponse(HandshakeResponse response) { public void onFailure(Exception e) { listener.onFailure(e); } - }, HandshakeResponse::new, ProtobufThreadPool.Names.GENERIC) + }, HandshakeResponse::new, ThreadPool.Names.GENERIC) ); } @@ -691,7 +692,7 @@ public TransportFuture submitRequest( ) throws ProtobufTransportException { ProtobufPlainTransportFuture futureHandler = new ProtobufPlainTransportFuture<>(handler); try { - ProtobufTransport.Connection connection = getConnection(node); + Transport.ProtobufConnection connection = getConnection(node); sendRequest(connection, action, request, options, futureHandler); } catch (ProtobufNodeNotConnectedException ex) { // the caller might not handle this so we invoke the handler @@ -706,7 +707,7 @@ public void sendRequest( final ProtobufTransportRequest request, final ProtobufTransportResponseHandler handler ) { - final ProtobufTransport.Connection connection; + final Transport.ProtobufConnection connection; try { connection = getConnection(node); } catch (final ProtobufNodeNotConnectedException ex) { @@ -724,7 +725,7 @@ public final void sendRequest( final TransportRequestOptions options, ProtobufTransportResponseHandler handler ) { - final ProtobufTransport.Connection connection; + final Transport.ProtobufConnection connection; try { connection = getConnection(node); } catch (final ProtobufNodeNotConnectedException ex) { @@ -746,7 +747,7 @@ public final void sendRequest( * @param the type of the transport response */ public final void sendRequest( - final ProtobufTransport.Connection connection, + final Transport.ProtobufConnection connection, final String action, final ProtobufTransportRequest request, final TransportRequestOptions options, @@ -757,7 +758,7 @@ public final void sendRequest( final ProtobufTransportResponseHandler delegate; if (request.getParentTask().isSet()) { // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. - final Releasable unregisterChildNode = taskManager.registerChildNode(request.getParentTask().getId(), connection.getNode()); + final Releasable unregisterChildNode = taskManager.registerProtobufChildNode(request.getParentTask().getId(), connection.getNode()); delegate = new ProtobufTransportResponseHandler() { @Override public void handleResponse(T response) { @@ -806,7 +807,7 @@ public String toString() { * Returns either a real transport connection or a local node connection if we are using the local node optimization. * @throws ProtobufNodeNotConnectedException if the given node is not connected */ - public ProtobufTransport.Connection getConnection(ProtobufDiscoveryNode node) { + public Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node) { if (isLocalNode(node)) { return localNodeConnection; } else { @@ -822,7 +823,7 @@ public final void sendChildRequest( final TransportRequestOptions options, final ProtobufTransportResponseHandler handler ) { - final ProtobufTransport.Connection connection; + final Transport.ProtobufConnection connection; try { connection = getConnection(node); } catch (final ProtobufNodeNotConnectedException ex) { @@ -834,7 +835,7 @@ public final void sendChildRequest( } public void sendChildRequest( - final ProtobufTransport.Connection connection, + final Transport.ProtobufConnection connection, final String action, final ProtobufTransportRequest request, final ProtobufTask parentTask, @@ -844,7 +845,7 @@ public void sendChildRequest( } public void sendChildRequest( - final ProtobufTransport.Connection connection, + final Transport.ProtobufConnection connection, final String action, final ProtobufTransportRequest request, final ProtobufTask parentTask, @@ -856,7 +857,7 @@ public void sendChildRequest( } private void sendRequestInternal( - final ProtobufTransport.Connection connection, + final Transport.ProtobufConnection connection, final String action, final ProtobufTransportRequest request, final TransportRequestOptions options, @@ -870,7 +871,7 @@ private void sendRequestInternal( Supplier storedContextSupplier = threadPool.getThreadContext().newRestorableContext(true); ContextRestoreResponseHandler responseHandler = new ContextRestoreResponseHandler<>(storedContextSupplier, handler); // TODO we can probably fold this entire request ID dance into connection.sendReqeust but it will be a bigger refactoring - final long requestId = responseHandlers.add(new ProtobufTransport.ResponseContext<>(responseHandler, connection, action)); + final long requestId = responseHandlers.add(new Transport.ProtobufResponseContext<>(responseHandler, connection, action)); final TimeoutHandler timeoutHandler; if (options.timeout() != null) { timeoutHandler = new TimeoutHandler(requestId, connection.getNode(), action); @@ -894,7 +895,7 @@ private void sendRequestInternal( } catch (final Exception e) { // usually happen either because we failed to connect to the node // or because we failed serializing the message - final ProtobufTransport.ResponseContext contextToNotify = responseHandlers.remove( + final Transport.ProtobufResponseContext contextToNotify = responseHandlers.remove( requestId ); // If holderToNotify == null then handler has already been taken care of. @@ -910,7 +911,7 @@ private void sendRequestInternal( action, e ); - final String executor = lifecycle.stoppedOrClosed() ? ProtobufThreadPool.Names.SAME : ProtobufThreadPool.Names.GENERIC; + final String executor = lifecycle.stoppedOrClosed() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC; threadPool.executor(executor).execute(new AbstractRunnable() { @Override public void onRejection(Exception e) { @@ -961,7 +962,7 @@ private void sendLocalRequest( throw new ActionNotFoundTransportException("Action [" + action + "] not found"); } final String executor = reg.getExecutor(); - if (ProtobufThreadPool.Names.SAME.equals(executor)) { + if (ThreadPool.Names.SAME.equals(executor)) { // noinspection unchecked reg.processMessageReceived(request, channel); } else { @@ -1024,7 +1025,7 @@ public static boolean shouldTraceAction(String action, String[] include, String[ } public ProtobufTransportAddress[] addressesFromString(String address) throws UnknownHostException { - return transport.addressesFromString(address); + return transport.addressesFromStringProtobuf(address); } /** @@ -1095,7 +1096,7 @@ public void registerRequestHandler( false, true ); - transport.registerRequestHandler(reg); + transport.registerProtobufRequestHandler(reg); } /** @@ -1127,11 +1128,11 @@ public void registerRequestHandler( forceExecution, canTripCircuitBreaker ); - transport.registerRequestHandler(reg); + transport.registerProtobufRequestHandler(reg); } /** - * called by the {@link ProtobufTransport} implementation when an incoming request arrives but before + * called by the {@link Transport} implementation when an incoming request arrives but before * any parsing of it has happened (with the exception of the requestId and action) */ @Override @@ -1145,7 +1146,7 @@ public void onRequestReceived(long requestId, String action) { messageListener.onRequestReceived(requestId, action); } - /** called by the {@link ProtobufTransport} implementation once a request has been sent */ + /** called by the {@link Transport} implementation once a request has been sent */ @Override public void onRequestSent( ProtobufDiscoveryNode node, @@ -1161,7 +1162,7 @@ public void onRequestSent( } @Override - public void onResponseReceived(long requestId, ProtobufTransport.ResponseContext holder) { + public void onResponseReceived(long requestId, Transport.ProtobufResponseContext holder) { if (holder == null) { checkForTimeout(requestId); } else if (tracerLog.isTraceEnabled() && shouldTraceAction(holder.action())) { @@ -1170,7 +1171,7 @@ public void onResponseReceived(long requestId, ProtobufTransport.ResponseContext messageListener.onResponseReceived(requestId, holder); } - /** called by the {@link ProtobufTransport} implementation once a response was sent to calling node */ + /** called by the {@link Transport} implementation once a response was sent to calling node */ @Override public void onResponseSent(long requestId, String action, ProtobufTransportResponse response) { if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { @@ -1179,7 +1180,7 @@ public void onResponseSent(long requestId, String action, ProtobufTransportRespo messageListener.onResponseSent(requestId, action, response); } - /** called by the {@link ProtobufTransport} implementation after an exception was sent as a response to an incoming request */ + /** called by the {@link Transport} implementation after an exception was sent as a response to an incoming request */ @Override public void onResponseSent(long requestId, String action, Exception e) { if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { @@ -1189,7 +1190,7 @@ public void onResponseSent(long requestId, String action, Exception e) { } public ProtobufRequestHandlerRegistry getRequestHandler(String action) { - return transport.getRequestHandlers().getHandler(action); + return transport.getProtobufRequestHandlers().getHandler(action); } private void checkForTimeout(long requestId) { @@ -1229,9 +1230,9 @@ private void checkForTimeout(long requestId) { } @Override - public void onConnectionClosed(ProtobufTransport.Connection connection) { + public void onConnectionClosed(Transport.ProtobufConnection connection) { try { - List> pruned = responseHandlers.prune( + List> pruned = responseHandlers.prune( h -> h.connection().getCacheKey().equals(connection.getCacheKey()) ); // callback that an exception happened, but on a different thread since we don't @@ -1239,7 +1240,7 @@ public void onConnectionClosed(ProtobufTransport.Connection connection) { getExecutorService().execute(new Runnable() { @Override public void run() { - for (ProtobufTransport.ResponseContext holderToNotify : pruned) { + for (Transport.ProtobufResponseContext holderToNotify : pruned) { holderToNotify.handler() .handleException(new ProtobufNodeDisconnectedException(connection.getNode(), holderToNotify.action())); } @@ -1275,7 +1276,7 @@ public void run() { long timeoutTime = threadPool.relativeTimeInMillis(); timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(node, action, sentTime, timeoutTime)); // now that we have the information visible via timeoutInfoHandlers, we try to remove the request id - final ProtobufTransport.ResponseContext holder = responseHandlers.remove(requestId); + final Transport.ProtobufResponseContext holder = responseHandlers.remove(requestId); if (holder != null) { assert holder.action().equals(action); assert holder.connection().getNode().equals(node); @@ -1313,7 +1314,7 @@ public String toString() { } private void scheduleTimeout(TimeValue timeout) { - this.cancellable = threadPool.schedule(this, timeout, ProtobufThreadPool.Names.GENERIC); + this.cancellable = threadPool.schedule(this, timeout, ThreadPool.Names.GENERIC); } } @@ -1424,14 +1425,14 @@ static class DirectResponseChannel implements ProtobufTransportChannel { private final String action; private final long requestId; final ProtobufTransportService service; - final ProtobufThreadPool threadPool; + final ThreadPool threadPool; DirectResponseChannel( ProtobufDiscoveryNode localNode, String action, long requestId, ProtobufTransportService service, - ProtobufThreadPool threadPool + ThreadPool threadPool ) { this.localNode = localNode; this.action = action; @@ -1452,7 +1453,7 @@ public void sendResponse(ProtobufTransportResponse response) throws IOException // ignore if its null, the service logs it if (handler != null) { final String executor = handler.executor(); - if (ProtobufThreadPool.Names.SAME.equals(executor)) { + if (ThreadPool.Names.SAME.equals(executor)) { processResponse(handler, response); } else { threadPool.executor(executor).execute(new Runnable() { @@ -1487,7 +1488,7 @@ public void sendResponse(Exception exception) throws IOException { if (handler != null) { final ProtobufRemoteTransportException rtx = wrapInRemote(exception); final String executor = handler.executor(); - if (ProtobufThreadPool.Names.SAME.equals(executor)) { + if (ThreadPool.Names.SAME.equals(executor)) { processException(handler, rtx); } else { threadPool.executor(handler.executor()).execute(new Runnable() { @@ -1537,7 +1538,7 @@ public Version getVersion() { /** * Returns the internal thread pool */ - public ProtobufThreadPool getThreadPool() { + public ThreadPool getThreadPool() { return threadPool; } @@ -1584,7 +1585,7 @@ public void onRequestSent( } @Override - public void onResponseReceived(long requestId, ProtobufTransport.ResponseContext holder) { + public void onResponseReceived(long requestId, Transport.ProtobufResponseContext holder) { for (ProtobufTransportMessageListener listener : listeners) { listener.onResponseReceived(requestId, holder); } diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java index 60e166a4e300c..ae4a98b04fdef 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java @@ -39,8 +39,10 @@ import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.Client; +import org.opensearch.client.ProtobufClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.common.Strings; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -150,10 +152,21 @@ public boolean isEnabled() { private final TransportService transportService; private final Map remoteClusters = ConcurrentCollections.newConcurrentMap(); + private final ProtobufTransportService protobufTransportService; + private final Map remoteClustersProtobuf = ConcurrentCollections.newConcurrentMap(); + RemoteClusterService(Settings settings, TransportService transportService) { super(settings); this.enabled = DiscoveryNode.isRemoteClusterClient(settings); this.transportService = transportService; + this.protobufTransportService = null; + } + + RemoteClusterService(Settings settings, ProtobufTransportService transportService) { + super(settings); + this.enabled = ProtobufDiscoveryNode.isRemoteClusterClient(settings); + this.protobufTransportService = transportService; + this.transportService = null; } /** @@ -163,13 +176,20 @@ public boolean isCrossClusterSearchEnabled() { return remoteClusters.isEmpty() == false; } + /** + * Returns true if at least one remote cluster is configured + */ + public boolean isCrossClusterSearchEnabledProtobuf() { + return remoteClustersProtobuf.isEmpty() == false; + } + boolean isRemoteNodeConnected(final String remoteCluster, final DiscoveryNode node) { return remoteClusters.get(remoteCluster).isNodeConnected(node); } public Map groupIndices(IndicesOptions indicesOptions, String[] indices, Predicate indexExists) { Map originalIndicesMap = new HashMap<>(); - if (isCrossClusterSearchEnabled()) { + if (isCrossClusterSearchEnabled() || isCrossClusterSearchEnabledProtobuf()) { final Map> groupedIndices = groupClusterIndices(getRemoteClusterNames(), indices, indexExists); if (groupedIndices.isEmpty()) { // search on _all in the local cluster if neither local indices nor remote indices were specified @@ -211,6 +231,15 @@ public Transport.Connection getConnection(DiscoveryNode node, String cluster) { return getRemoteClusterConnection(cluster).getConnection(node); } + /** + * Returns a connection to the given node on the given remote cluster + * + * @throws IllegalArgumentException if the remote cluster is unknown + */ + public Transport.ProtobufConnection getConnectionProtobuf(ProtobufDiscoveryNode node, String cluster) { + return getRemoteClusterConnectionProtobuf(cluster).getConnection(node); + } + /** * Ensures that the given cluster alias is connected. If the cluster is connected this operation * will invoke the listener immediately. @@ -230,6 +259,10 @@ public Transport.Connection getConnection(String cluster) { return getRemoteClusterConnection(cluster).getConnection(); } + public Transport.ProtobufConnection getConnectionProtobuf(String cluster) { + return getRemoteClusterConnectionProtobuf(cluster).getConnection(); + } + RemoteClusterConnection getRemoteClusterConnection(String cluster) { if (enabled == false) { throw new IllegalArgumentException( @@ -243,6 +276,19 @@ RemoteClusterConnection getRemoteClusterConnection(String cluster) { return connection; } + ProtobufRemoteClusterConnection getRemoteClusterConnectionProtobuf(String cluster) { + if (enabled == false) { + throw new IllegalArgumentException( + "this node does not have the " + DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName() + " role" + ); + } + ProtobufRemoteClusterConnection connection = remoteClustersProtobuf.get(cluster); + if (connection == null) { + throw new NoSuchRemoteClusterException(cluster); + } + return connection; + } + Set getRemoteClusterNames() { return this.remoteClusters.keySet(); } @@ -418,6 +464,25 @@ public Client getRemoteClusterClient(ThreadPool threadPool, String clusterAlias) return new RemoteClusterAwareClient(settings, threadPool, transportService, clusterAlias); } + /** + * Returns a client to the remote cluster if the given cluster alias exists. + * + * @param threadPool the {@link ThreadPool} for the client + * @param clusterAlias the cluster alias the remote cluster is registered under + * @throws IllegalArgumentException if the given clusterAlias doesn't exist + */ + public ProtobufClient getRemoteClusterClientProtobuf(ThreadPool threadPool, String clusterAlias) { + if (protobufTransportService.getRemoteClusterService().isEnabled() == false) { + throw new IllegalArgumentException( + "this node does not have the " + DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName() + " role" + ); + } + if (protobufTransportService.getRemoteClusterService().getRemoteClusterNames().contains(clusterAlias) == false) { + throw new NoSuchRemoteClusterException(clusterAlias); + } + return new ProtobufRemoteClusterAwareClient(settings, threadPool, protobufTransportService, clusterAlias); + } + Collection getConnections() { return remoteClusters.values(); } diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index 8733cb5fe7e8e..a72ba2b78fef6 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -39,6 +39,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ThreadedActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.common.Booleans; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.bytes.BytesArray; @@ -56,6 +57,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.transport.PortsRange; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; @@ -149,6 +152,8 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final InboundHandler inboundHandler; private final ResponseHandlers responseHandlers = new ResponseHandlers(); private final RequestHandlers requestHandlers = new RequestHandlers(); + private final ProtobufResponseHandlers protobufResponseHandlers = new ProtobufResponseHandlers(); + private final ProtobufRequestHandlers protobufRequestHandlers = new ProtobufRequestHandlers(); private final AtomicLong outboundConnectionCount = new AtomicLong(); // also used as a correlation ID for open/close logs @@ -1015,6 +1020,53 @@ public final RequestHandlers getRequestHandlers() { return requestHandlers; } + @Override + public void setMessageListener(ProtobufTransportMessageListener listener) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'setMessageListener'"); + } + + @Override + public ProtobufBoundTransportAddress boundProtobufAddress() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'boundProtobufAddress'"); + } + + @Override + public Map profileProtobufBoundAddresses() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'profileProtobufBoundAddresses'"); + } + + @Override + public ProtobufTransportAddress[] addressesFromStringProtobuf(String address) throws UnknownHostException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'addressesFromStringProtobuf'"); + } + + @Override + public void openProtobufConnection(ProtobufDiscoveryNode node, ProtobufConnectionProfile profile, + ActionListener listener) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'openProtobufConnection'"); + } + + @Override + public ProtobufTransportStats getProtobufStats() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'getProtobufStats'"); + } + + @Override + public ProtobufResponseHandlers getProtobufResponseHandlers() { + return protobufResponseHandlers; + } + + @Override + public ProtobufRequestHandlers getProtobufRequestHandlers() { + return protobufRequestHandlers; + } + private final class ChannelsConnectedListener implements ActionListener { private final DiscoveryNode node; diff --git a/server/src/main/java/org/opensearch/transport/Transport.java b/server/src/main/java/org/opensearch/transport/Transport.java index 3bf855f847685..079934562a03d 100644 --- a/server/src/main/java/org/opensearch/transport/Transport.java +++ b/server/src/main/java/org/opensearch/transport/Transport.java @@ -35,9 +35,12 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; @@ -69,6 +72,15 @@ default void registerRequestHandler(RequestHa void setMessageListener(TransportMessageListener listener); + /** + * Registers a new request handler + */ + default void registerProtobufRequestHandler(ProtobufRequestHandlerRegistry reg) { + getProtobufRequestHandlers().registerHandler(reg); + } + + void setMessageListener(ProtobufTransportMessageListener listener); + default void setSlowLogThreshold(TimeValue slowLogThreshold) {} default boolean isSecure() { @@ -91,6 +103,23 @@ default boolean isSecure() { */ TransportAddress[] addressesFromString(String address) throws UnknownHostException; + /** + * The address the transport is bound on. + */ + ProtobufBoundTransportAddress boundProtobufAddress(); + + /** + * Further profile bound addresses + * @return null iff profiles are unsupported, otherwise a map with name of profile and its bound transport address + */ + Map profileProtobufBoundAddresses(); + + /** + * Returns an address from its string representation. + */ + ProtobufTransportAddress[] addressesFromStringProtobuf(String address) throws UnknownHostException; + + /** * Returns a list of all local addresses for this transport */ @@ -102,12 +131,28 @@ default boolean isSecure() { */ void openConnection(DiscoveryNode node, ConnectionProfile profile, ActionListener listener); + /** + * Opens a new connection to the given node. When the connection is fully connected, the listener is called. + * The ActionListener will be called on the calling thread or the generic thread pool. + */ + void openProtobufConnection( + ProtobufDiscoveryNode node, + ProtobufConnectionProfile profile, + ActionListener listener + ); + TransportStats getStats(); ResponseHandlers getResponseHandlers(); RequestHandlers getRequestHandlers(); + ProtobufTransportStats getProtobufStats(); + + ProtobufResponseHandlers getProtobufResponseHandlers(); + + ProtobufRequestHandlers getProtobufRequestHandlers(); + /** * A unidirectional connection to a {@link DiscoveryNode} */ @@ -158,6 +203,56 @@ default Object getCacheKey() { void close(); } + /** + * A unidirectional connection to a {@link ProtobufDiscoveryNode} + */ + interface ProtobufConnection extends Closeable { + /** + * The node this connection is associated with + */ + ProtobufDiscoveryNode getNode(); + + /** + * Sends the request to the node this connection is associated with + * @param requestId see {@link ResponseHandlers#add(ResponseContext)} for details + * @param action the action to execute + * @param request the request to send + * @param options request options to apply + * @throws NodeNotConnectedException if the given node is not connected + */ + void sendRequest(long requestId, String action, ProtobufTransportRequest request, TransportRequestOptions options) + throws IOException, TransportException; + + /** + * The listener's {@link ActionListener#onResponse(Object)} method will be called when this + * connection is closed. No implementations currently throw an exception during close, so + * {@link ActionListener#onFailure(Exception)} will not be called. + * + * @param listener to be called + */ + void addCloseListener(ActionListener listener); + + boolean isClosed(); + + /** + * Returns the version of the node this connection was established with. + */ + default Version getVersion() { + return getNode().getVersion(); + } + + /** + * Returns a key that this connection can be cached on. Delegating subclasses must delegate method call to + * the original connection. + */ + default Object getCacheKey() { + return this; + } + + @Override + void close(); + } + /** * This class represents a response context that encapsulates the actual response handler, the action and the connection it was * executed on. @@ -189,6 +284,37 @@ public String action() { } } + /** + * This class represents a response context that encapsulates the actual response handler, the action and the connection it was + * executed on. + */ + final class ProtobufResponseContext { + + private final ProtobufTransportResponseHandler handler; + + private final ProtobufConnection connection; + + private final String action; + + ProtobufResponseContext(ProtobufTransportResponseHandler handler, ProtobufConnection connection, String action) { + this.handler = handler; + this.connection = connection; + this.action = action; + } + + public ProtobufTransportResponseHandler handler() { + return handler; + } + + public ProtobufConnection connection() { + return this.connection; + } + + public String action() { + return this.action; + } + } + /** * This class is a registry that allows */ @@ -268,6 +394,87 @@ public TransportResponseHandler onResponseReceived( } } + /** + * This class is a registry that allows + */ + final class ProtobufResponseHandlers { + private final ConcurrentMapLong> handlers = ConcurrentCollections + .newConcurrentMapLongWithAggressiveConcurrency(); + private final AtomicLong requestIdGenerator = new AtomicLong(); + + /** + * Returns true if the give request ID has a context associated with it. + */ + public boolean contains(long requestId) { + return handlers.containsKey(requestId); + } + + /** + * Removes and return the {@link ResponseContext} for the given request ID or returns + * null if no context is associated with this request ID. + */ + public ProtobufResponseContext remove(long requestId) { + return handlers.remove(requestId); + } + + /** + * Adds a new response context and associates it with a new request ID. + * @return the new request ID + * @see ProtobufConnection#sendRequest(long, String, ProtobufTransportRequest, TransportRequestOptions) + */ + public long add(ProtobufResponseContext holder) { + long requestId = newRequestId(); + ProtobufResponseContext existing = handlers.put(requestId, holder); + assert existing == null : "request ID already in use: " + requestId; + return requestId; + } + + /** + * Returns a new request ID to use when sending a message via {@link ProtobufConnection#sendRequest(long, String, + * ProtobufTransportRequest, TransportRequestOptions)} + */ + long newRequestId() { + return requestIdGenerator.incrementAndGet(); + } + + /** + * Removes and returns all {@link ResponseContext} instances that match the predicate + */ + public List> prune( + Predicate> predicate + ) { + final List> holders = new ArrayList<>(); + for (Map.Entry> entry : handlers.entrySet()) { + ProtobufResponseContext holder = entry.getValue(); + if (predicate.test(holder)) { + ProtobufResponseContext remove = handlers.remove(entry.getKey()); + if (remove != null) { + holders.add(holder); + } + } + } + return holders; + } + + /** + * called by the {@link Transport} implementation when a response or an exception has been received for a previously + * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not + * found. + */ + public ProtobufTransportResponseHandler onResponseReceived( + final long requestId, + final ProtobufTransportMessageListener listener + ) { + ProtobufResponseContext context = handlers.remove(requestId); + listener.onResponseReceived(requestId, context); + if (context == null) { + return null; + } else { + return context.handler(); + } + } + } + /** * Request handler implementations * @@ -277,6 +484,9 @@ final class RequestHandlers { private volatile Map> requestHandlers = Collections.emptyMap(); + private volatile Map> protobufRequestHandlers = Collections + .emptyMap(); + synchronized void registerHandler(RequestHandlerRegistry reg) { if (requestHandlers.containsKey(reg.getAction())) { throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); @@ -295,4 +505,33 @@ public RequestHandlerRegistry getHandler(String return (RequestHandlerRegistry) requestHandlers.get(action); } } + + /** + * Request handler implementations + * + * @opensearch.internal + */ + final class ProtobufRequestHandlers { + + private volatile Map> requestHandlers = Collections + .emptyMap(); + + synchronized void registerHandler(ProtobufRequestHandlerRegistry reg) { + if (requestHandlers.containsKey(reg.getAction())) { + throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); + } + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + } + + // TODO: Only visible for testing. Perhaps move StubbableTransport from + // org.opensearch.test.transport to org.opensearch.transport + public synchronized void forceRegister(ProtobufRequestHandlerRegistry reg) { + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + } + + @SuppressWarnings("unchecked") + public ProtobufRequestHandlerRegistry getHandler(String action) { + return (ProtobufRequestHandlerRegistry) requestHandlers.get(action); + } + } } diff --git a/server/src/main/java/org/opensearch/usage/UsageService.java b/server/src/main/java/org/opensearch/usage/UsageService.java index 14e25b591bda0..cffdb99b802eb 100644 --- a/server/src/main/java/org/opensearch/usage/UsageService.java +++ b/server/src/main/java/org/opensearch/usage/UsageService.java @@ -53,6 +53,7 @@ import org.opensearch.action.admin.cluster.node.usage.NodeUsage; import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.ProtobufBaseRestHandler; import java.util.HashMap; import java.util.Locale; @@ -67,9 +68,11 @@ public class UsageService { private final Map handlers; + private final Map protobufHandlers; public UsageService() { this.handlers = new HashMap<>(); + this.protobufHandlers = new HashMap<>(); } /** @@ -100,6 +103,34 @@ public void addRestHandler(BaseRestHandler handler) { } } + /** + * Add a REST handler to this service. + * + * @param handler the {@link ProtobufBaseRestHandler} to add to the usage service. + */ + public void addProtobufRestHandler(ProtobufBaseRestHandler handler) { + Objects.requireNonNull(handler); + if (handler.getName() == null) { + throw new IllegalArgumentException("handler of type [" + handler.getClass().getName() + "] does not have a name"); + } + final ProtobufBaseRestHandler maybeHandler = protobufHandlers.put(handler.getName(), handler); + /* + * Handlers will be registered multiple times, once for each route that the handler handles. This means that we will see handlers + * multiple times, so we do not have a conflict if we are seeing the same instance multiple times. So, we only reject if a handler + * with the same name was registered before, and it is not the same instance as before. + */ + if (maybeHandler != null && maybeHandler != handler) { + final String message = String.format( + Locale.ROOT, + "handler of type [%s] conflicts with handler of type [%s] as they both have the same name [%s]", + handler.getClass().getName(), + maybeHandler.getClass().getName(), + handler.getName() + ); + throw new IllegalArgumentException(message); + } + } + /** * Get the current usage statistics for this node. * diff --git a/server/src/test/java/org/opensearch/rest/RestControllerTests.java b/server/src/test/java/org/opensearch/rest/RestControllerTests.java index ea0ce54913a8d..25a8637479ec5 100644 --- a/server/src/test/java/org/opensearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/opensearch/rest/RestControllerTests.java @@ -228,7 +228,7 @@ public void testRegisterAsDeprecatedHandler() { controller.registerAsDeprecatedHandler(method, path, handler, deprecationMessage); - verify(controller).registerHandler(eq(method), eq(path), any(DeprecationRestHandler.class)); + verify(controller).registerProtobufHandler(eq(method), eq(path), any(DeprecationRestHandler.class)); } public void testRegisterWithDeprecatedHandler() { @@ -255,7 +255,7 @@ public void testRegisterWithDeprecatedHandler() { controller.registerWithDeprecatedHandler(method, path, handler, deprecatedMethod, deprecatedPath); - verify(controller).registerHandler(method, path, handler); + verify(controller).registerProtobufHandler(method, path, handler); verify(controller).registerAsDeprecatedHandler(deprecatedMethod, deprecatedPath, handler, deprecationMessage); } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java index 4d59afd5f99ed..b70a143cff948 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java @@ -34,17 +34,24 @@ import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.transport.TransportAddress; import org.opensearch.transport.CloseableConnection; import org.opensearch.transport.ConnectionProfile; +import org.opensearch.transport.ProtobufConnectionProfile; +import org.opensearch.transport.ProtobufTransportMessageListener; +import org.opensearch.transport.ProtobufTransportStats; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportMessageListener; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportStats; +import java.net.UnknownHostException; import java.util.Collections; import java.util.List; import java.util.Map; @@ -130,4 +137,53 @@ protected void doStop() { protected void doClose() { } + + @Override + public void setMessageListener(ProtobufTransportMessageListener listener) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'setMessageListener'"); + } + + @Override + public ProtobufBoundTransportAddress boundProtobufAddress() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'boundProtobufAddress'"); + } + + @Override + public Map profileProtobufBoundAddresses() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'profileProtobufBoundAddresses'"); + } + + @Override + public ProtobufTransportAddress[] addressesFromStringProtobuf(String address) throws UnknownHostException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'addressesFromStringProtobuf'"); + } + + @Override + public void openProtobufConnection(ProtobufDiscoveryNode node, ProtobufConnectionProfile profile, + ActionListener listener) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'openProtobufConnection'"); + } + + @Override + public ProtobufTransportStats getProtobufStats() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'getProtobufStats'"); + } + + @Override + public ProtobufResponseHandlers getProtobufResponseHandlers() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'getProtobufResponseHandlers'"); + } + + @Override + public ProtobufRequestHandlers getProtobufRequestHandlers() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'getProtobufRequestHandlers'"); + } } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java index 8d66d481dc4aa..9770e9e93d1e6 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java @@ -35,12 +35,18 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.ProtobufDiscoveryNode; import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.lifecycle.LifecycleListener; import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.transport.TransportAddress; import org.opensearch.tasks.Task; import org.opensearch.transport.ConnectionProfile; +import org.opensearch.transport.ProtobufConnectionProfile; +import org.opensearch.transport.ProtobufTransportMessageListener; +import org.opensearch.transport.ProtobufTransportStats; import org.opensearch.transport.RequestHandlerRegistry; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportChannel; @@ -310,4 +316,53 @@ void messageReceived(TransportRequestHandler handler, Request request, default void clearCallback() {} } + + @Override + public void setMessageListener(ProtobufTransportMessageListener listener) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'setMessageListener'"); + } + + @Override + public ProtobufBoundTransportAddress boundProtobufAddress() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'boundProtobufAddress'"); + } + + @Override + public Map profileProtobufBoundAddresses() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'profileProtobufBoundAddresses'"); + } + + @Override + public ProtobufTransportAddress[] addressesFromStringProtobuf(String address) throws UnknownHostException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'addressesFromStringProtobuf'"); + } + + @Override + public void openProtobufConnection(ProtobufDiscoveryNode node, ProtobufConnectionProfile profile, + ActionListener listener) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'openProtobufConnection'"); + } + + @Override + public ProtobufTransportStats getProtobufStats() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'getProtobufStats'"); + } + + @Override + public ProtobufResponseHandlers getProtobufResponseHandlers() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'getProtobufResponseHandlers'"); + } + + @Override + public ProtobufRequestHandlers getProtobufRequestHandlers() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'getProtobufRequestHandlers'"); + } } From a5ce258ae0b44a779e0cdc77749763a8d3b69d7f Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Wed, 21 Jun 2023 23:14:33 +0000 Subject: [PATCH 10/37] Fixes Signed-off-by: Vacha Shah --- ...ProtobufNodeAndClusterIdStateListener.java | 68 +++++++++++++++++++ .../main/java/org/opensearch/node/Node.java | 47 +++++++++++++ 2 files changed, 115 insertions(+) create mode 100644 server/src/main/java/org/opensearch/common/logging/ProtobufNodeAndClusterIdStateListener.java diff --git a/server/src/main/java/org/opensearch/common/logging/ProtobufNodeAndClusterIdStateListener.java b/server/src/main/java/org/opensearch/common/logging/ProtobufNodeAndClusterIdStateListener.java new file mode 100644 index 0000000000000..4b1be0914747d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/logging/ProtobufNodeAndClusterIdStateListener.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.logging; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.ProtobufClusterState; +import org.opensearch.cluster.ProtobufClusterStateObserver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ThreadContext; + +/** + * The {@link ProtobufNodeAndClusterIdStateListener} listens to cluster state changes and ONLY when receives the first update + * it sets the clusterUUID and nodeID in log4j pattern converter {@link NodeAndClusterIdConverter}. + * Once the first update is received, it will automatically be de-registered from subsequent updates. + * + * @opensearch.internal + */ +public class ProtobufNodeAndClusterIdStateListener implements ProtobufClusterStateObserver.Listener { + private static final Logger logger = LogManager.getLogger(ProtobufNodeAndClusterIdStateListener.class); + + private ProtobufNodeAndClusterIdStateListener() {} + + /** + * Subscribes for the first cluster state update where nodeId and clusterId is present + * and sets these values in {@link NodeAndClusterIdConverter}. + */ + public static void getAndSetNodeIdAndClusterId(ClusterService clusterService, ThreadContext threadContext) { + ProtobufClusterState clusterState = clusterService.protobufState(); + ProtobufClusterStateObserver observer = new ProtobufClusterStateObserver(clusterState, clusterService, null, logger, threadContext); + + observer.waitForNextChange(new ProtobufNodeAndClusterIdStateListener(), ProtobufNodeAndClusterIdStateListener::isNodeAndClusterIdPresent); + } + + private static boolean isNodeAndClusterIdPresent(ProtobufClusterState clusterState) { + return getNodeId(clusterState) != null && getClusterUUID(clusterState) != null; + } + + private static String getClusterUUID(ProtobufClusterState state) { + return state.getMetadata().clusterUUID(); + } + + private static String getNodeId(ProtobufClusterState state) { + return state.getNodes().getLocalNodeId(); + } + + @Override + public void onNewClusterState(ProtobufClusterState state) { + String nodeId = getNodeId(state); + String clusterUUID = getClusterUUID(state); + + logger.debug("Received cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID); + NodeAndClusterIdConverter.setNodeIdAndClusterId(nodeId, clusterUUID); + } + + @Override + public void onClusterServiceClose() {} + + @Override + public void onTimeout(TimeValue timeout) {} +} diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index e77e82368bd9b..60c0d4c75879b 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -101,6 +101,8 @@ import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.InternalClusterInfoService; import org.opensearch.cluster.NodeConnectionsService; +import org.opensearch.cluster.ProtobufClusterState; +import org.opensearch.cluster.ProtobufClusterStateObserver; import org.opensearch.cluster.action.index.MappingUpdatedAction; import org.opensearch.cluster.metadata.AliasValidator; import org.opensearch.cluster.metadata.IndexTemplateMetadata; @@ -129,6 +131,7 @@ import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.logging.HeaderWarning; import org.opensearch.common.logging.NodeAndClusterIdStateListener; +import org.opensearch.common.logging.ProtobufNodeAndClusterIdStateListener; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.network.NetworkService; @@ -225,6 +228,7 @@ import org.opensearch.snapshots.SnapshotShardsService; import org.opensearch.snapshots.SnapshotsInfoService; import org.opensearch.snapshots.SnapshotsService; +import org.opensearch.tasks.ProtobufTaskCancellationService; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskResultsService; @@ -1402,8 +1406,13 @@ public Node start() throws NodeValidationException { transportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class)); transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService)); + ProtobufTransportService protobufTransportService = injector.getInstance(ProtobufTransportService.class); + protobufTransportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class)); + protobufTransportService.getTaskManager().setTaskCancellationService(new ProtobufTaskCancellationService(protobufTransportService)); + TaskResourceTrackingService taskResourceTrackingService = injector.getInstance(TaskResourceTrackingService.class); transportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); + protobufTransportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); runnableTaskListener.set(taskResourceTrackingService); transportService.start(); @@ -1456,14 +1465,18 @@ public Node start() throws NodeValidationException { assert clusterService.localNode().equals(localNodeFactory.getNode()) : "clusterService has a different local node than the factory provided"; transportService.acceptIncomingRequests(); + // protobufTransportService.acceptIncomingRequests(); discovery.startInitialJoin(); final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings()); configureNodeAndClusterIdStateListener(clusterService); + configureProtobufNodeAndClusterIdStateListener(clusterService); if (initialStateTimeout.millis() > 0) { final ThreadPool thread = injector.getInstance(ThreadPool.class); ClusterState clusterState = clusterService.state(); + ProtobufClusterState protobufClusterState = clusterService.protobufState(); ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext()); + ProtobufClusterStateObserver protobufObserver = new ProtobufClusterStateObserver(protobufClusterState, clusterService, null, logger, thread.getThreadContext()); if (clusterState.nodes().getClusterManagerNodeId() == null) { logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout); @@ -1486,6 +1499,33 @@ public void onTimeout(TimeValue timeout) { } }, state -> state.nodes().getClusterManagerNodeId() != null, initialStateTimeout); + try { + latch.await(); + } catch (InterruptedException e) { + throw new OpenSearchTimeoutException("Interrupted while waiting for initial discovery state"); + } + } + if (protobufClusterState.nodes().getClusterManagerNodeId() == null) { + logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout); + final CountDownLatch latch = new CountDownLatch(1); + protobufObserver.waitForNextChange(new ProtobufClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ProtobufClusterState state) { + latch.countDown(); + } + + @Override + public void onClusterServiceClose() { + latch.countDown(); + } + + @Override + public void onTimeout(TimeValue timeout) { + logger.warn("timed out while waiting for initial discovery state - timeout: {}", initialStateTimeout); + latch.countDown(); + } + }, state -> state.nodes().getClusterManagerNodeId() != null, initialStateTimeout); + try { latch.await(); } catch (InterruptedException e) { @@ -1517,6 +1557,13 @@ protected void configureNodeAndClusterIdStateListener(ClusterService clusterServ ); } + protected void configureProtobufNodeAndClusterIdStateListener(ClusterService clusterService) { + ProtobufNodeAndClusterIdStateListener.getAndSetNodeIdAndClusterId( + clusterService, + injector.getInstance(ThreadPool.class).getThreadContext() + ); + } + private Node stop() { if (!lifecycle.moveToStopped()) { return this; From 0d76b3be3b0c6eb41d34fcc80272ee52b7f6b495 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Fri, 30 Jun 2023 07:09:05 +0000 Subject: [PATCH 11/37] Fixing nodes api response for protobuf Signed-off-by: Vacha Shah --- .../java/org/opensearch/core/index/Index.java | 20 +- .../opensearch/core/index/shard/ShardId.java | 18 +- .../breaker/CircuitBreakerService.java | 10 + .../breaker/NoneCircuitBreakerService.java | 10 + .../org/opensearch/action/ActionModule.java | 40 +- .../cluster/node/info/ProtobufNodeInfo.java | 24 +- .../node/info/ProtobufNodesInfoResponse.java | 116 ++- .../node/info/ProtobufPluginsAndModules.java | 18 + .../ProtobufTransportNodesInfoAction.java | 33 +- .../node/info/TransportNodesInfoAction.java | 3 +- .../cluster/node/stats/ProtobufNodeStats.java | 4 +- .../node/stats/ProtobufNodesStatsRequest.java | 10 +- .../ProtobufNodesStatsRequestBuilder.java | 4 +- .../stats/ProtobufNodesStatsResponse.java | 8 +- .../ProtobufTransportNodesStatsAction.java | 12 +- .../state/ProtobufClusterStateResponse.java | 15 +- .../ProtobufTransportClusterStateAction.java | 5 +- .../admin/indices/stats/CommonStatsFlags.java | 51 +- .../indices/stats/ProtobufCommonStats.java | 74 +- .../stats/ProtobufIndexShardStats.java | 98 +++ .../indices/stats/ProtobufShardStats.java | 178 ++++ .../action/main/ProtobufMainRequest.java | 1 - .../action/main/ProtobufMainResponse.java | 14 +- .../main/ProtobufTransportMainAction.java | 5 +- .../ProtobufHandledTransportAction.java | 5 +- ...obufTransportClusterManagerNodeAction.java | 39 +- .../nodes/ProtobufBaseNodeResponse.java | 10 +- .../nodes/ProtobufBaseNodesRequest.java | 12 +- .../nodes/ProtobufBaseNodesResponse.java | 16 +- .../nodes/ProtobufTransportNodesAction.java | 17 +- .../ClusterManagerNodeChangePredicate.java | 6 +- .../org/opensearch/cluster/ClusterName.java | 15 +- .../org/opensearch/cluster/ClusterState.java | 20 + .../cluster/ProtobufClusterName.java | 76 -- .../cluster/ProtobufClusterState.java | 94 ++- .../cluster/ProtobufClusterStateObserver.java | 11 +- .../cluster/coordination/Coordinator.java | 18 + .../PublicationTransportHandler.java | 8 + .../opensearch/cluster/metadata/Metadata.java | 28 + .../ProtobufIndexNameExpressionResolver.java | 15 +- .../cluster/node/DiscoveryNode.java | 89 +- .../cluster/node/ProtobufDiscoveryNode.java | 537 ------------ .../cluster/node/ProtobufDiscoveryNodes.java | 159 ++-- .../cluster/routing/AllocationId.java | 21 +- .../cluster/routing/IndexRoutingTable.java | 14 + .../routing/IndexShardRoutingTable.java | 16 + .../cluster/routing/RoutingTable.java | 14 + .../cluster/routing/ShardRouting.java | 61 +- .../cluster/routing/UnassignedInfo.java | 64 +- .../cluster/service/ClusterApplier.java | 7 + .../service/ClusterApplierService.java | 9 + .../common/io/stream/ProtobufStreamInput.java | 2 +- ...ProtobufNodeAndClusterIdStateListener.java | 5 +- .../common/network/NetworkModule.java | 6 +- .../common/settings/SettingsModule.java | 1 - .../ProtobufBoundTransportAddress.java | 24 + .../transport/ProtobufTransportAddress.java | 28 +- .../org/opensearch/discovery/Discovery.java | 5 + .../http/AbstractHttpServerTransport.java | 28 +- .../opensearch/http/HttpServerTransport.java | 8 +- .../org/opensearch/http/ProtobufHttpInfo.java | 41 + .../cache/request/ShardRequestCache.java | 4 + .../opensearch/index/engine/CommitStats.java | 38 +- .../org/opensearch/index/engine/Engine.java | 77 ++ .../index/fielddata/ShardFieldData.java | 18 + .../opensearch/index/get/ShardGetService.java | 10 + .../index/search/stats/ShardSearchStats.java | 45 ++ .../index/seqno/RetentionLease.java | 34 +- .../index/seqno/RetentionLeaseStats.java | 28 +- .../index/seqno/RetentionLeases.java | 35 +- .../opensearch/index/seqno/SeqNoStats.java | 17 +- .../opensearch/index/shard/IndexShard.java | 93 +++ .../index/shard/InternalIndexingStats.java | 25 + .../org/opensearch/index/store/Store.java | 8 + .../index/warmer/ShardIndexWarmerService.java | 4 + .../opensearch/indices/IndicesQueryCache.java | 34 + .../opensearch/indices/IndicesService.java | 141 ++++ .../indices/ProtobufNodeIndicesStats.java | 19 +- .../HierarchyCircuitBreakerService.java | 33 + .../org/opensearch/ingest/IngestService.java | 31 +- .../opensearch/ingest/ProtobufIngestInfo.java | 14 + .../ingest/ProtobufProcessorInfo.java | 13 +- .../monitor/ProtobufMonitorService.java | 77 ++ .../org/opensearch/monitor/fs/FsProbe.java | 95 +++ .../monitor/fs/ProtobufFsService.java | 94 +++ .../org/opensearch/monitor/jvm/JvmInfo.java | 87 +- .../monitor/jvm/ProtobufJvmInfo.java | 571 ------------- .../monitor/jvm/ProtobufJvmService.java | 61 ++ .../org/opensearch/monitor/os/OsProbe.java | 109 +++ .../opensearch/monitor/os/ProtobufOsInfo.java | 37 + .../monitor/os/ProtobufOsService.java | 70 ++ .../monitor/process/ProcessProbe.java | 10 + .../monitor/process/ProtobufProcessInfo.java | 21 + .../process/ProtobufProcessService.java | 67 ++ .../main/java/org/opensearch/node/Node.java | 116 +-- .../node/ProtobufNodeClosedException.java | 4 +- .../opensearch/node/ProtobufNodeService.java | 86 +- .../node/ProtobufReportingService.java | 3 +- .../node/ResponseCollectorService.java | 12 + .../plugins/ProtobufPluginInfo.java | 24 +- .../org/opensearch/rest/BaseRestHandler.java | 2 + .../rest/ProtobufBaseRestHandler.java | 2 + .../rest/ProtobufDeprecationRestHandler.java | 105 +++ .../org/opensearch/rest/RestBaseHandler.java | 8 +- .../org/opensearch/rest/RestController.java | 41 +- .../action/cat/ProtobufRestNodesAction.java | 10 +- .../rest/action/cat/RestNodesAction.java | 1 - .../org/opensearch/script/ScriptCache.java | 8 + .../org/opensearch/script/ScriptMetrics.java | 13 + .../org/opensearch/script/ScriptService.java | 16 + .../support/AggregationUsageService.java | 10 +- .../support/ProtobufAggregationInfo.java | 18 + .../pipeline/ProtobufSearchPipelineInfo.java | 14 + .../pipeline/SearchPipelineService.java | 17 +- .../org/opensearch/tasks/ProtobufTask.java | 6 +- .../ProtobufTaskCancellationService.java | 12 +- .../opensearch/tasks/ProtobufTaskManager.java | 763 ------------------ .../org/opensearch/tasks/TaskManager.java | 42 +- .../threadpool/ProtobufThreadPoolInfo.java | 15 + .../org/opensearch/threadpool/ThreadPool.java | 1 - .../opensearch/transport/InboundHandler.java | 9 + .../opensearch/transport/OutboundHandler.java | 9 + .../ProtobufClusterConnectionManager.java | 35 +- .../ProtobufConnectTransportException.java | 18 +- .../transport/ProtobufConnectionManager.java | 18 +- .../transport/ProtobufConnectionProfile.java | 9 +- .../ProtobufNodeDisconnectedException.java | 4 +- .../ProtobufNodeNotConnectedException.java | 4 +- .../transport/ProtobufOutboundHandler.java | 4 +- .../ProtobufProxyConnectionStrategy.java | 27 +- ...tobufReceiveTimeoutTransportException.java | 6 +- .../ProtobufRemoteClusterAwareClient.java | 13 +- .../ProtobufRemoteClusterAwareRequest.java | 29 - .../ProtobufRemoteClusterConnection.java | 14 +- .../ProtobufRemoteClusterService.java | 20 +- .../ProtobufRemoteConnectionManager.java | 46 +- .../ProtobufRemoteConnectionStrategy.java | 4 +- .../ProtobufRequestHandlerRegistry.java | 5 +- ...ProtobufSendRequestTransportException.java | 6 +- .../ProtobufSniffConnectionStrategy.java | 68 +- .../transport/ProtobufTransport.java | 281 ------- .../ProtobufTransportActionProxy.java | 12 +- .../ProtobufTransportConnectionListener.java | 6 +- .../transport/ProtobufTransportInfo.java | 22 + .../ProtobufTransportInterceptor.java | 6 +- .../ProtobufTransportMessageListener.java | 4 +- .../transport/ProtobufTransportService.java | 134 +-- .../transport/RemoteClusterService.java | 6 +- .../opensearch/transport/TcpTransport.java | 73 +- .../org/opensearch/transport/Transport.java | 13 +- .../cluster/NodeConnectionsServiceTests.java | 79 ++ .../coordination/NoOpClusterApplier.java | 6 + .../common/network/NetworkModuleTests.java | 12 + .../opensearch/rest/RestControllerTests.java | 16 +- .../org/opensearch/test/NoopDiscovery.java | 6 + .../opensearch/test/MockHttpTransport.java | 24 + .../test/transport/FakeTransport.java | 5 +- .../test/transport/StubbableTransport.java | 5 +- 158 files changed, 3757 insertions(+), 2970 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufIndexShardStats.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufShardStats.java delete mode 100644 server/src/main/java/org/opensearch/cluster/ProtobufClusterName.java delete mode 100644 server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNode.java create mode 100644 server/src/main/java/org/opensearch/monitor/ProtobufMonitorService.java create mode 100644 server/src/main/java/org/opensearch/monitor/fs/ProtobufFsService.java delete mode 100644 server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmInfo.java create mode 100644 server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmService.java create mode 100644 server/src/main/java/org/opensearch/monitor/os/ProtobufOsService.java create mode 100644 server/src/main/java/org/opensearch/monitor/process/ProtobufProcessService.java create mode 100644 server/src/main/java/org/opensearch/rest/ProtobufDeprecationRestHandler.java delete mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java delete mode 100644 server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareRequest.java delete mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransport.java diff --git a/libs/core/src/main/java/org/opensearch/core/index/Index.java b/libs/core/src/main/java/org/opensearch/core/index/Index.java index fc5c5152a5500..7bff2f7d0dc3b 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/Index.java +++ b/libs/core/src/main/java/org/opensearch/core/index/Index.java @@ -33,6 +33,7 @@ package org.opensearch.core.index; import org.opensearch.core.ParseField; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -42,6 +43,9 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.Objects; @@ -50,7 +54,7 @@ * * @opensearch.internal */ -public class Index implements Writeable, ToXContentObject { +public class Index implements Writeable, ToXContentObject, ProtobufWriteable { public static final Index[] EMPTY_ARRAY = new Index[0]; private static final String INDEX_UUID_KEY = "index_uuid"; @@ -77,6 +81,14 @@ public Index(StreamInput in) throws IOException { this.uuid = in.readString(); } + /** + * Read from a stream. + */ + public Index(CodedInputStream in) throws IOException { + this.name = in.readString(); + this.uuid = in.readString(); + } + public String getName() { return this.name; } @@ -122,6 +134,12 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(uuid); } + @Override + public void writeTo(final CodedOutputStream out) throws IOException { + out.writeStringNoTag(name); + out.writeStringNoTag(uuid); + } + @Override public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); diff --git a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java index b01121c3f30d4..e978978f2920c 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java +++ b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java @@ -33,6 +33,7 @@ package org.opensearch.core.index.shard; import org.opensearch.core.common.Strings; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -40,6 +41,9 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.index.Index; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; /** @@ -47,7 +51,7 @@ * * @opensearch.internal */ -public class ShardId implements Comparable, ToXContentFragment, Writeable { +public class ShardId implements Comparable, ToXContentFragment, Writeable, ProtobufWriteable { private final Index index; private final int shardId; @@ -69,12 +73,24 @@ public ShardId(StreamInput in) throws IOException { hashCode = computeHashCode(); } + public ShardId(CodedInputStream in) throws IOException { + index = new Index(in); + shardId = in.readInt32(); + hashCode = computeHashCode(); + } + @Override public void writeTo(StreamOutput out) throws IOException { index.writeTo(out); out.writeVInt(shardId); } + @Override + public void writeTo(CodedOutputStream out) throws IOException { + index.writeTo(out); + out.writeInt32NoTag(shardId); + } + public Index getIndex() { return index; } diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java index ee9c94f432a36..6069a1a20a23a 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java @@ -63,6 +63,16 @@ protected CircuitBreakerService() {} */ public abstract CircuitBreakerStats stats(String name); + /** + * @return stats about all breakers + */ + public abstract ProtobufAllCircuitBreakerStats protobufStats(); + + /** + * @return stats about a specific breaker + */ + public abstract ProtobufCircuitBreakerStats protobufStats(String name); + @Override protected void doStart() {} diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java index 4095fd32b6d3c..626c4ad5d0100 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java @@ -63,4 +63,14 @@ public CircuitBreakerStats stats(String name) { return new CircuitBreakerStats(CircuitBreaker.FIELDDATA, -1, -1, 0, 0); } + @Override + public ProtobufAllCircuitBreakerStats protobufStats() { + return new ProtobufAllCircuitBreakerStats(new ProtobufCircuitBreakerStats[] { protobufStats(CircuitBreaker.FIELDDATA) }); + } + + @Override + public ProtobufCircuitBreakerStats protobufStats(String name) { + return new ProtobufCircuitBreakerStats(CircuitBreaker.FIELDDATA, -1, -1, 0, 0); + } + } diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index e7edc75b4f235..ecf5d1884cf65 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -297,7 +297,6 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.ProtobufIndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.node.ProtobufDiscoveryNodes; import org.opensearch.common.NamedRegistry; import org.opensearch.common.inject.AbstractModule; import org.opensearch.common.inject.TypeLiteral; @@ -558,9 +557,11 @@ public ActionModule( this.settingsFilter = settingsFilter; this.actionPlugins = actionPlugins; this.protobufIndexNameExpressionResolver = null; - this.protobufActionPlugins = new ArrayList<>();; + this.protobufActionPlugins = new ArrayList<>(); + ; this.protobufActions = new HashMap>(); - this.protobufActionFilters = setupProtobufActionFilters(this.protobufActionPlugins);; + this.protobufActionFilters = setupProtobufActionFilters(this.protobufActionPlugins); + ; this.threadPool = threadPool; this.extensionsManager = extensionsManager; actions = setupActions(actionPlugins); @@ -660,7 +661,15 @@ public ActionModule( actionPlugins.stream().flatMap(p -> p.indicesAliasesRequestValidators().stream()).collect(Collectors.toList()) ); - restController = new RestController(headers, restWrapper, nodeClient, protobufRestWrapper, protobufNodeClient, circuitBreakerService, usageService); + restController = new RestController( + headers, + restWrapper, + nodeClient, + protobufRestWrapper, + protobufNodeClient, + circuitBreakerService, + usageService + ); } public Map> getActions() { @@ -1125,17 +1134,17 @@ public void initProtobufRestHandlers() { registerHandler.accept(new ProtobufRestNodesAction()); // for (ActionPlugin plugin : actionPlugins) { - // for (ProtobufActionPlugin handler : plugin.getRestHandlers( - // settings, - // restController, - // clusterSettings, - // indexScopedSettings, - // settingsFilter, - // indexNameExpressionResolver, - // nodesInCluster - // )) { - // registerHandler.accept(handler); - // } + // for (ProtobufActionPlugin handler : plugin.getRestHandlers( + // settings, + // restController, + // clusterSettings, + // indexScopedSettings, + // settingsFilter, + // indexNameExpressionResolver, + // nodesInCluster + // )) { + // registerHandler.accept(handler); + // } // } registerHandler.accept(new ProtobufRestCatAction(catActions)); } @@ -1192,7 +1201,6 @@ protected void configure() { // register dynamic ActionType -> transportAction Map used by NodeClient bind(ProtobufDynamicActionRegistry.class).toInstance(protobufDynamicActionRegistry); - } public ActionFilters getActionFilters() { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java index df059711196cd..d8184bfa0a46d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java @@ -18,7 +18,7 @@ import org.opensearch.Build; import org.opensearch.Version; import org.opensearch.action.support.nodes.ProtobufBaseNodeResponse; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; @@ -27,7 +27,7 @@ import org.opensearch.http.ProtobufHttpInfo; import org.opensearch.ingest.ProtobufIngestInfo; import org.opensearch.monitor.jvm.JvmInfo; -import org.opensearch.monitor.jvm.ProtobufJvmInfo; +import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.monitor.os.OsInfo; import org.opensearch.monitor.os.ProtobufOsInfo; import org.opensearch.monitor.process.ProtobufProcessInfo; @@ -79,7 +79,7 @@ public ProtobufNodeInfo(CodedInputStream in) throws IOException { } addInfoIfNonNull(ProtobufOsInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufOsInfo::new)); addInfoIfNonNull(ProtobufProcessInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufProcessInfo::new)); - addInfoIfNonNull(ProtobufJvmInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufJvmInfo::new)); + addInfoIfNonNull(JvmInfo.class, protobufStreamInput.readOptionalWriteable(JvmInfo::new)); addInfoIfNonNull(ProtobufThreadPoolInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufThreadPoolInfo::new)); addInfoIfNonNull(ProtobufTransportInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufTransportInfo::new)); addInfoIfNonNull(ProtobufHttpInfo.class, protobufStreamInput.readOptionalWriteable(ProtobufHttpInfo::new)); @@ -94,11 +94,11 @@ public ProtobufNodeInfo(CodedInputStream in) throws IOException { public ProtobufNodeInfo( Version version, Build build, - ProtobufDiscoveryNode node, + DiscoveryNode node, @Nullable Settings settings, @Nullable ProtobufOsInfo os, @Nullable ProtobufProcessInfo process, - @Nullable ProtobufJvmInfo jvm, + @Nullable JvmInfo jvm, @Nullable ProtobufThreadPoolInfo threadPool, @Nullable ProtobufTransportInfo transport, @Nullable ProtobufHttpInfo http, @@ -114,7 +114,7 @@ public ProtobufNodeInfo( this.settings = settings; addInfoIfNonNull(ProtobufOsInfo.class, os); addInfoIfNonNull(ProtobufProcessInfo.class, process); - addInfoIfNonNull(ProtobufJvmInfo.class, jvm); + addInfoIfNonNull(JvmInfo.class, jvm); addInfoIfNonNull(ProtobufThreadPoolInfo.class, threadPool); addInfoIfNonNull(ProtobufTransportInfo.class, transport); addInfoIfNonNull(ProtobufHttpInfo.class, http); @@ -204,7 +204,7 @@ public void writeTo(CodedOutputStream out) throws IOException { } protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufOsInfo.class)); protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufProcessInfo.class)); - protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufJvmInfo.class)); + protobufStreamOutput.writeOptionalWriteable(getInfo(JvmInfo.class)); protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufThreadPoolInfo.class)); protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufTransportInfo.class)); protobufStreamOutput.writeOptionalWriteable(getInfo(ProtobufHttpInfo.class)); @@ -216,7 +216,7 @@ public void writeTo(CodedOutputStream out) throws IOException { } } - public static ProtobufNodeInfo.Builder builder(Version version, Build build, ProtobufDiscoveryNode node) { + public static ProtobufNodeInfo.Builder builder(Version version, Build build, DiscoveryNode node) { return new Builder(version, build, node); } @@ -226,9 +226,9 @@ public static ProtobufNodeInfo.Builder builder(Version version, Build build, Pro public static class Builder { private final Version version; private final Build build; - private final ProtobufDiscoveryNode node; + private final DiscoveryNode node; - private Builder(Version version, Build build, ProtobufDiscoveryNode node) { + private Builder(Version version, Build build, DiscoveryNode node) { this.version = version; this.build = build; this.node = node; @@ -237,7 +237,7 @@ private Builder(Version version, Build build, ProtobufDiscoveryNode node) { private Settings settings; private ProtobufOsInfo os; private ProtobufProcessInfo process; - private ProtobufJvmInfo jvm; + private JvmInfo jvm; private ProtobufThreadPoolInfo threadPool; private ProtobufTransportInfo transport; private ProtobufHttpInfo http; @@ -262,7 +262,7 @@ public Builder setProcess(ProtobufProcessInfo process) { return this; } - public Builder setJvm(ProtobufJvmInfo jvm) { + public Builder setJvm(JvmInfo jvm) { this.jvm = jvm; return this; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java index 2f82367e0c9e7..7b46d535c5224 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java @@ -36,29 +36,42 @@ import com.google.protobuf.CodedOutputStream; import org.opensearch.action.ProtobufFailedNodeException; import org.opensearch.action.support.nodes.ProtobufBaseNodesResponse; -import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.Strings; import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.ToXContent.Params; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.http.ProtobufHttpInfo; +import org.opensearch.ingest.ProtobufIngestInfo; +import org.opensearch.monitor.jvm.JvmInfo; +import org.opensearch.monitor.os.ProtobufOsInfo; +import org.opensearch.monitor.process.ProtobufProcessInfo; +import org.opensearch.search.aggregations.support.ProtobufAggregationInfo; +import org.opensearch.search.pipeline.ProtobufSearchPipelineInfo; +import org.opensearch.threadpool.ProtobufThreadPoolInfo; +import org.opensearch.transport.ProtobufTransportInfo; import java.io.IOException; import java.util.List; +import java.util.Map; /** * Transport response for OpenSearch Node Information * * @opensearch.internal */ -public class ProtobufNodesInfoResponse extends ProtobufBaseNodesResponse { +public class ProtobufNodesInfoResponse extends ProtobufBaseNodesResponse implements ToXContentFragment { public ProtobufNodesInfoResponse(CodedInputStream in) throws IOException { super(in); } - public ProtobufNodesInfoResponse( - ProtobufClusterName clusterName, - List nodes, - List failures - ) { + public ProtobufNodesInfoResponse(ClusterName clusterName, List nodes, List failures) { super(clusterName, nodes, failures); } @@ -73,4 +86,93 @@ protected void writeNodesTo(CodedOutputStream out, List nodes) ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); protobufStreamOutput.writeCollection(nodes, (o, v) -> v.writeTo(o)); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("nodes"); + for (ProtobufNodeInfo nodeInfo : getNodes()) { + builder.startObject(nodeInfo.getNode().getId()); + + builder.field("name", nodeInfo.getNode().getName()); + builder.field("transport_address", nodeInfo.getNode().getAddress().toString()); + builder.field("host", nodeInfo.getNode().getHostName()); + builder.field("ip", nodeInfo.getNode().getHostAddress()); + + builder.field("version", nodeInfo.getVersion()); + builder.field("build_type", nodeInfo.getBuild().type().displayName()); + builder.field("build_hash", nodeInfo.getBuild().hash()); + if (nodeInfo.getTotalIndexingBuffer() != null) { + builder.humanReadableField("total_indexing_buffer", "total_indexing_buffer_in_bytes", nodeInfo.getTotalIndexingBuffer()); + } + + builder.startArray("roles"); + for (DiscoveryNodeRole role : nodeInfo.getNode().getRoles()) { + builder.value(role.roleName()); + } + builder.endArray(); + + if (!nodeInfo.getNode().getAttributes().isEmpty()) { + builder.startObject("attributes"); + for (Map.Entry entry : nodeInfo.getNode().getAttributes().entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + } + + if (nodeInfo.getSettings() != null) { + builder.startObject("settings"); + Settings settings = nodeInfo.getSettings(); + settings.toXContent(builder, params); + builder.endObject(); + } + + if (nodeInfo.getInfo(ProtobufOsInfo.class) != null) { + nodeInfo.getInfo(ProtobufOsInfo.class).toXContent(builder, params); + } + if (nodeInfo.getInfo(ProtobufProcessInfo.class) != null) { + nodeInfo.getInfo(ProtobufProcessInfo.class).toXContent(builder, params); + } + if (nodeInfo.getInfo(JvmInfo.class) != null) { + nodeInfo.getInfo(JvmInfo.class).toXContent(builder, params); + } + if (nodeInfo.getInfo(ProtobufThreadPoolInfo.class) != null) { + nodeInfo.getInfo(ProtobufThreadPoolInfo.class).toXContent(builder, params); + } + if (nodeInfo.getInfo(ProtobufTransportInfo.class) != null) { + nodeInfo.getInfo(ProtobufTransportInfo.class).toXContent(builder, params); + } + if (nodeInfo.getInfo(ProtobufHttpInfo.class) != null) { + nodeInfo.getInfo(ProtobufHttpInfo.class).toXContent(builder, params); + } + if (nodeInfo.getInfo(ProtobufPluginsAndModules.class) != null) { + nodeInfo.getInfo(ProtobufPluginsAndModules.class).toXContent(builder, params); + } + if (nodeInfo.getInfo(ProtobufIngestInfo.class) != null) { + nodeInfo.getInfo(ProtobufIngestInfo.class).toXContent(builder, params); + } + if (nodeInfo.getInfo(ProtobufAggregationInfo.class) != null) { + nodeInfo.getInfo(ProtobufAggregationInfo.class).toXContent(builder, params); + } + if (nodeInfo.getInfo(ProtobufSearchPipelineInfo.class) != null) { + nodeInfo.getInfo(ProtobufSearchPipelineInfo.class).toXContent(builder, params); + } + + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public String toString() { + try { + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return Strings.toString(builder); + } catch (IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java index c1117c4321544..8c9e574170aaa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufPluginsAndModules.java @@ -13,6 +13,7 @@ import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.ProtobufReportingService; import org.opensearch.plugins.ProtobufPluginInfo; @@ -74,4 +75,21 @@ public void addPlugin(ProtobufPluginInfo info) { public void addModule(ProtobufPluginInfo info) { modules.add(info); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("plugins"); + for (ProtobufPluginInfo pluginInfo : getPluginInfos()) { + pluginInfo.toXContent(builder, params); + } + builder.endArray(); + // TODO: not ideal, make a better api for this (e.g. with jar metadata, and so on) + builder.startArray("modules"); + for (ProtobufPluginInfo moduleInfo : getModuleInfos()) { + moduleInfo.toXContent(builder, params); + } + builder.endArray(); + + return builder; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java index 223520853fafe..e00dd900ffc35 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java @@ -13,11 +13,9 @@ import org.opensearch.action.ProtobufFailedNodeException; import org.opensearch.action.support.ProtobufActionFilters; import org.opensearch.action.support.nodes.ProtobufTransportNodesAction; -import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.ProtobufNodeService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ProtobufTransportRequest; @@ -68,7 +66,7 @@ protected ProtobufNodesInfoResponse newResponse( List responses, List failures ) { - return new ProtobufNodesInfoResponse(new ProtobufClusterName(clusterService.getClusterName().value()), responses, failures); + return new ProtobufNodesInfoResponse(new ClusterName(clusterService.getClusterName().value()), responses, failures); } @Override @@ -85,20 +83,21 @@ protected ProtobufNodeInfo newNodeResponse(CodedInputStream in) throws IOExcepti protected ProtobufNodeInfo nodeOperation(NodeInfoRequest nodeRequest) { ProtobufNodesInfoRequest request = nodeRequest.request; Set metrics = request.requestedMetrics(); - return nodeService.info( - metrics.contains(NodesInfoRequest.Metric.SETTINGS.metricName()), - metrics.contains(NodesInfoRequest.Metric.OS.metricName()), - metrics.contains(NodesInfoRequest.Metric.PROCESS.metricName()), - metrics.contains(NodesInfoRequest.Metric.JVM.metricName()), - metrics.contains(NodesInfoRequest.Metric.THREAD_POOL.metricName()), - metrics.contains(NodesInfoRequest.Metric.TRANSPORT.metricName()), - metrics.contains(NodesInfoRequest.Metric.HTTP.metricName()), - metrics.contains(NodesInfoRequest.Metric.PLUGINS.metricName()), - metrics.contains(NodesInfoRequest.Metric.INGEST.metricName()), - metrics.contains(NodesInfoRequest.Metric.AGGREGATIONS.metricName()), - metrics.contains(NodesInfoRequest.Metric.INDICES.metricName()), - metrics.contains(NodesInfoRequest.Metric.SEARCH_PIPELINES.metricName()) + ProtobufNodeInfo protobufNodeInfo = nodeService.info( + metrics.contains(ProtobufNodesInfoRequest.Metric.SETTINGS.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.OS.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.PROCESS.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.JVM.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.THREAD_POOL.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.TRANSPORT.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.HTTP.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.PLUGINS.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.INGEST.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.AGGREGATIONS.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.INDICES.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.SEARCH_PIPELINES.metricName()) ); + return protobufNodeInfo; } /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 2c4f8522a5a5c..64cd1a887ac12 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -106,7 +106,7 @@ protected NodeInfo newNodeResponse(StreamInput in) throws IOException { protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) { NodesInfoRequest request = nodeRequest.request; Set metrics = request.requestedMetrics(); - return nodeService.info( + NodeInfo nodeInfo = nodeService.info( metrics.contains(NodesInfoRequest.Metric.SETTINGS.metricName()), metrics.contains(NodesInfoRequest.Metric.OS.metricName()), metrics.contains(NodesInfoRequest.Metric.PROCESS.metricName()), @@ -120,6 +120,7 @@ protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) { metrics.contains(NodesInfoRequest.Metric.INDICES.metricName()), metrics.contains(NodesInfoRequest.Metric.SEARCH_PIPELINES.metricName()) ); + return nodeInfo; } /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java index 8a96eb41bb1b7..5c5a13a4e5e65 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java @@ -11,7 +11,7 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; import org.opensearch.action.support.nodes.ProtobufBaseNodeResponse; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.ProtobufStreamInput; @@ -105,7 +105,7 @@ public ProtobufNodeStats(CodedInputStream in) throws IOException { } public ProtobufNodeStats( - ProtobufDiscoveryNode node, + DiscoveryNode node, long timestamp, @Nullable ProtobufNodeIndicesStats indices, @Nullable ProtobufOsStats os, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java index c61deaf87c413..19b7f907932c7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java @@ -10,7 +10,7 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; -import org.opensearch.action.admin.indices.stats.ProtobufCommonStatsFlags; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.support.nodes.ProtobufBaseNodesRequest; import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; @@ -29,7 +29,7 @@ */ public class ProtobufNodesStatsRequest extends ProtobufBaseNodesRequest { - private ProtobufCommonStatsFlags indices = new ProtobufCommonStatsFlags(); + private CommonStatsFlags indices = new CommonStatsFlags(); private final Set requestedMetrics = new HashSet<>(); public ProtobufNodesStatsRequest() { @@ -39,7 +39,7 @@ public ProtobufNodesStatsRequest() { public ProtobufNodesStatsRequest(CodedInputStream in) throws IOException { super(in); - indices = new ProtobufCommonStatsFlags(in); + indices = new CommonStatsFlags(in); requestedMetrics.clear(); ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); requestedMetrics.addAll(protobufStreamInput.readList(CodedInputStream::readString)); @@ -76,7 +76,7 @@ public ProtobufNodesStatsRequest clear() { * may not have submetrics. * @return flags indicating which indices stats to return */ - public ProtobufCommonStatsFlags indices() { + public CommonStatsFlags indices() { return indices; } @@ -86,7 +86,7 @@ public ProtobufCommonStatsFlags indices() { * @param indices flags indicating which indices stats to return * @return This object, for request chaining. */ - public ProtobufNodesStatsRequest indices(ProtobufCommonStatsFlags indices) { + public ProtobufNodesStatsRequest indices(CommonStatsFlags indices) { this.indices = indices; return this; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequestBuilder.java index 3c6d14ad478d5..659a4aa7da185 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequestBuilder.java @@ -8,7 +8,7 @@ package org.opensearch.action.admin.cluster.node.stats; -import org.opensearch.action.admin.indices.stats.ProtobufCommonStatsFlags; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.support.nodes.ProtobufNodesOperationRequestBuilder; import org.opensearch.client.ProtobufOpenSearchClient; @@ -75,7 +75,7 @@ public ProtobufNodesStatsRequestBuilder setIndices(boolean indices) { /** * Should the node indices stats be returned. */ - public ProtobufNodesStatsRequestBuilder setIndices(ProtobufCommonStatsFlags indices) { + public ProtobufNodesStatsRequestBuilder setIndices(CommonStatsFlags indices) { request.indices(indices); return this; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java index 248608203dc79..ef18285256f39 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java @@ -12,7 +12,7 @@ import com.google.protobuf.CodedOutputStream; import org.opensearch.action.ProtobufFailedNodeException; import org.opensearch.action.support.nodes.ProtobufBaseNodesResponse; -import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.ClusterName; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; @@ -34,11 +34,7 @@ public ProtobufNodesStatsResponse(CodedInputStream in) throws IOException { super(in); } - public ProtobufNodesStatsResponse( - ProtobufClusterName clusterName, - List nodes, - List failures - ) { + public ProtobufNodesStatsResponse(ClusterName clusterName, List nodes, List failures) { super(clusterName, nodes, failures); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java index b12bd7ac0a4d1..ed7f8e4002dc0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java @@ -13,11 +13,9 @@ import org.opensearch.action.ProtobufFailedNodeException; import org.opensearch.action.support.ProtobufActionFilters; import org.opensearch.action.support.nodes.ProtobufTransportNodesAction; -import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.ProtobufNodeService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ProtobufTransportRequest; @@ -63,8 +61,12 @@ public ProtobufTransportNodesStatsAction( } @Override - protected ProtobufNodesStatsResponse newResponse(ProtobufNodesStatsRequest request, List responses, List failures) { - return new ProtobufNodesStatsResponse(new ProtobufClusterName(clusterService.getClusterName().value()), responses, failures); + protected ProtobufNodesStatsResponse newResponse( + ProtobufNodesStatsRequest request, + List responses, + List failures + ) { + return new ProtobufNodesStatsResponse(new ClusterName(clusterService.getClusterName().value()), responses, failures); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java index 60892290c94a5..af24e5dc13b2c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java @@ -17,7 +17,7 @@ import com.google.protobuf.CodedOutputStream; import org.opensearch.action.ProtobufActionResponse; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ProtobufClusterState; import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; @@ -31,19 +31,19 @@ */ public class ProtobufClusterStateResponse extends ProtobufActionResponse { - private ProtobufClusterName clusterName; + private ClusterName clusterName; private ProtobufClusterState clusterState; private boolean waitForTimedOut = false; public ProtobufClusterStateResponse(CodedInputStream in) throws IOException { super(in); ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); - clusterName = new ProtobufClusterName(in); + clusterName = new ClusterName(in); clusterState = protobufStreamInput.readOptionalWriteable(innerIn -> ProtobufClusterState.readFrom(innerIn, null)); waitForTimedOut = in.readBool(); } - public ProtobufClusterStateResponse(ProtobufClusterName clusterName, ProtobufClusterState clusterState, boolean waitForTimedOut) { + public ProtobufClusterStateResponse(ClusterName clusterName, ProtobufClusterState clusterState, boolean waitForTimedOut) { this.clusterName = clusterName; this.clusterState = clusterState; this.waitForTimedOut = waitForTimedOut; @@ -60,7 +60,7 @@ public ProtobufClusterState getState() { /** * The name of the cluster. */ - public ProtobufClusterName getClusterName() { + public ClusterName getClusterName() { return this.clusterName; } @@ -79,4 +79,9 @@ public void writeTo(CodedOutputStream out) throws IOException { protobufStreamOutput.writeOptionalWriteable(clusterState); out.writeBoolNoTag(waitForTimedOut); } + + @Override + public String toString() { + return "ClusterStateResponse{" + "clusterState=" + clusterState + '}'; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java index b8a663c409904..117dd3032331e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java @@ -26,7 +26,6 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; import org.opensearch.node.NodeClosedException; import org.opensearch.threadpool.ThreadPool; @@ -41,7 +40,9 @@ * * @opensearch.internal */ -public class ProtobufTransportClusterStateAction extends ProtobufTransportClusterManagerNodeReadAction { +public class ProtobufTransportClusterStateAction extends ProtobufTransportClusterManagerNodeReadAction< + ProtobufClusterStateRequest, + ProtobufClusterStateResponse> { private final Logger logger = LogManager.getLogger(getClass()); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 502b90417615f..75e660b4b9e3c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -37,6 +37,13 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.Strings; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; import java.io.IOException; import java.util.Collections; @@ -47,7 +54,7 @@ * * @opensearch.internal */ -public class CommonStatsFlags implements Writeable, Cloneable { +public class CommonStatsFlags implements Writeable, ProtobufWriteable, Cloneable { public static final CommonStatsFlags ALL = new CommonStatsFlags().all(); public static final CommonStatsFlags NONE = new CommonStatsFlags().clear(); @@ -111,6 +118,48 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(includeOnlyTopIndexingPressureMetrics); } + public CommonStatsFlags(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + final long longFlags = in.readInt64(); + flags.clear(); + for (Flag flag : Flag.values()) { + if ((longFlags & (1 << flag.getIndex())) != 0) { + flags.add(flag); + } + } + if (protobufStreamInput.getVersion().before(Version.V_2_0_0)) { + protobufStreamInput.readStringArray(); + } + groups = protobufStreamInput.readStringArray(); + fieldDataFields = protobufStreamInput.readStringArray(); + completionDataFields = protobufStreamInput.readStringArray(); + includeSegmentFileSizes = in.readBool(); + includeUnloadedSegments = in.readBool(); + includeAllShardIndexingPressureTrackers = in.readBool(); + includeOnlyTopIndexingPressureMetrics = in.readBool(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + long longFlags = 0; + for (Flag flag : flags) { + longFlags |= (1 << flag.getIndex()); + } + out.writeInt64NoTag(longFlags); + + if (protobufStreamOutput.getVersion().before(Version.V_2_0_0)) { + protobufStreamOutput.writeStringArrayNullable(Strings.EMPTY_ARRAY); + } + protobufStreamOutput.writeStringArrayNullable(groups); + protobufStreamOutput.writeStringArrayNullable(fieldDataFields); + protobufStreamOutput.writeStringArrayNullable(completionDataFields); + out.writeBoolNoTag(includeSegmentFileSizes); + out.writeBoolNoTag(includeUnloadedSegments); + out.writeBoolNoTag(includeAllShardIndexingPressureTrackers); + out.writeBoolNoTag(includeOnlyTopIndexingPressureMetrics); + } + /** * Sets all flags to return all stats. */ diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStats.java index 89200608328dc..de1331c7f21cb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufCommonStats.java @@ -10,6 +10,8 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; + +import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; @@ -28,11 +30,13 @@ import org.opensearch.index.recovery.ProtobufRecoveryStats; import org.opensearch.index.refresh.ProtobufRefreshStats; import org.opensearch.index.search.stats.ProtobufSearchStats; +import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ProtobufDocsStats; import org.opensearch.index.shard.ProtobufIndexingStats; import org.opensearch.index.store.ProtobufStoreStats; import org.opensearch.index.translog.ProtobufTranslogStats; import org.opensearch.index.warmer.ProtobufWarmerStats; +import org.opensearch.indices.IndicesQueryCache; import org.opensearch.search.suggest.completion.ProtobufCompletionStats; import java.io.IOException; @@ -96,13 +100,13 @@ public class ProtobufCommonStats implements ProtobufWriteable, ToXContentFragmen public ProtobufRecoveryStats recoveryStats; public ProtobufCommonStats() { - this(ProtobufCommonStatsFlags.NONE); + this(CommonStatsFlags.NONE); } - public ProtobufCommonStats(ProtobufCommonStatsFlags flags) { - ProtobufCommonStatsFlags.Flag[] setFlags = flags.getFlags(); + public ProtobufCommonStats(CommonStatsFlags flags) { + CommonStatsFlags.Flag[] setFlags = flags.getFlags(); - for (ProtobufCommonStatsFlags.Flag flag : setFlags) { + for (CommonStatsFlags.Flag flag : setFlags) { switch (flag) { case Docs: docs = new ProtobufDocsStats(); @@ -158,6 +162,68 @@ public ProtobufCommonStats(ProtobufCommonStatsFlags flags) { } } + public ProtobufCommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) { + CommonStatsFlags.Flag[] setFlags = flags.getFlags(); + for (CommonStatsFlags.Flag flag : setFlags) { + try { + switch (flag) { + case Docs: + docs = indexShard.protobufDocsStats(); + break; + case Store: + store = indexShard.protobufStoreStats(); + break; + case Indexing: + indexing = indexShard.protobufIndexingStats(); + break; + case Get: + get = indexShard.getProtobufStats(); + break; + case Search: + search = indexShard.protobufSearchStats(flags.groups()); + break; + case Merge: + merge = indexShard.protobufMergeStats(); + break; + case Refresh: + refresh = indexShard.protobufRefreshStats(); + break; + case Flush: + flush = indexShard.protobufFlushStats(); + break; + case Warmer: + warmer = indexShard.protobufWarmerStats(); + break; + case QueryCache: + queryCache = indicesQueryCache.getProtobufStats(indexShard.shardId()); + break; + case FieldData: + fieldData = indexShard.protobufFieldDataStats(flags.fieldDataFields()); + break; + // case Completion: + // completion = indexShard.completionStats(flags.completionDataFields()); + // break; + case Segments: + segments = indexShard.protobufSegmentStats(flags.includeSegmentFileSizes(), flags.includeUnloadedSegments()); + break; + // case Translog: + // translog = indexShard.translogStats(); + // break; + case RequestCache: + requestCache = indexShard.requestCache().protobufStats(); + break; + case Recovery: + recoveryStats = indexShard.protobufRecoveryStats(); + break; + default: + throw new IllegalStateException("Unknown Flag: " + flag); + } + } catch (AlreadyClosedException e) { + // shard is closed - no stats is fine + } + } + } + public ProtobufCommonStats(CodedInputStream in) throws IOException { ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); docs = protobufStreamInput.readOptionalWriteable(ProtobufDocsStats::new); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufIndexShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufIndexShardStats.java new file mode 100644 index 0000000000000..849061e865d71 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufIndexShardStats.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.stats; + +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.index.shard.ShardId; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Iterator; + +/** + * ProtobufIndexShardStats for OpenSearch + * + * @opensearch.internal + */ +public class ProtobufIndexShardStats implements Iterable, ProtobufWriteable { + + private final ShardId shardId; + + private final ProtobufShardStats[] shards; + + public ProtobufIndexShardStats(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + shardId = new ShardId(in); + shards = protobufStreamInput.readArray(ProtobufShardStats::new, ProtobufShardStats[]::new); + } + + public ProtobufIndexShardStats(ShardId shardId, ProtobufShardStats[] shards) { + this.shardId = shardId; + this.shards = shards; + } + + public ShardId getShardId() { + return this.shardId; + } + + public ProtobufShardStats[] getShards() { + return shards; + } + + public ProtobufShardStats getAt(int position) { + return shards[position]; + } + + @Override + public Iterator iterator() { + return Arrays.stream(shards).iterator(); + } + + private ProtobufCommonStats total = null; + + public ProtobufCommonStats getTotal() { + if (total != null) { + return total; + } + ProtobufCommonStats stats = new ProtobufCommonStats(); + for (ProtobufShardStats shard : shards) { + stats.add(shard.getStats()); + } + total = stats; + return stats; + } + + private ProtobufCommonStats primary = null; + + public ProtobufCommonStats getPrimary() { + if (primary != null) { + return primary; + } + ProtobufCommonStats stats = new ProtobufCommonStats(); + for (ProtobufShardStats shard : shards) { + if (shard.getShardRouting().primary()) { + stats.add(shard.getStats()); + } + } + primary = stats; + return stats; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + shardId.writeTo(out); + protobufStreamOutput.writeArray((o, v) -> v.writeTo(o), shards); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufShardStats.java new file mode 100644 index 0000000000000..514c4195f1b1d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ProtobufShardStats.java @@ -0,0 +1,178 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.stats; + +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.engine.CommitStats; +import org.opensearch.index.seqno.RetentionLeaseStats; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.shard.ShardPath; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + +import java.io.IOException; + +/** + * Shard Stats for OpenSearch + * + * @opensearch.internal + */ +public class ProtobufShardStats implements ProtobufWriteable, ToXContentFragment { + + private ShardRouting shardRouting; + private ProtobufCommonStats commonStats; + @Nullable + private CommitStats commitStats; + @Nullable + private SeqNoStats seqNoStats; + + @Nullable + private RetentionLeaseStats retentionLeaseStats; + + /** + * Gets the current retention lease stats. + * + * @return the current retention lease stats + */ + public RetentionLeaseStats getRetentionLeaseStats() { + return retentionLeaseStats; + } + + private String dataPath; + private String statePath; + private boolean isCustomDataPath; + + public ProtobufShardStats(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + shardRouting = new ShardRouting(in); + commonStats = new ProtobufCommonStats(in); + commitStats = CommitStats.readOptionalCommitStatsFromProtobuf(in); + statePath = in.readString(); + dataPath = in.readString(); + isCustomDataPath = in.readBool(); + seqNoStats = protobufStreamInput.readOptionalWriteable(SeqNoStats::new); + retentionLeaseStats = protobufStreamInput.readOptionalWriteable(RetentionLeaseStats::new); + } + + public ProtobufShardStats( + final ShardRouting routing, + final ShardPath shardPath, + final ProtobufCommonStats commonStats, + final CommitStats commitStats, + final SeqNoStats seqNoStats, + final RetentionLeaseStats retentionLeaseStats + ) { + this.shardRouting = routing; + this.dataPath = shardPath.getRootDataPath().toString(); + this.statePath = shardPath.getRootStatePath().toString(); + this.isCustomDataPath = shardPath.isCustomDataPath(); + this.commitStats = commitStats; + this.commonStats = commonStats; + this.seqNoStats = seqNoStats; + this.retentionLeaseStats = retentionLeaseStats; + } + + /** + * The shard routing information (cluster wide shard state). + */ + public ShardRouting getShardRouting() { + return this.shardRouting; + } + + public ProtobufCommonStats getStats() { + return this.commonStats; + } + + @Nullable + public CommitStats getCommitStats() { + return this.commitStats; + } + + @Nullable + public SeqNoStats getSeqNoStats() { + return this.seqNoStats; + } + + public String getDataPath() { + return dataPath; + } + + public String getStatePath() { + return statePath; + } + + public boolean isCustomDataPath() { + return isCustomDataPath; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + shardRouting.writeTo(out); + commonStats.writeTo(out); + protobufStreamOutput.writeOptionalWriteable(commitStats); + out.writeStringNoTag(statePath); + out.writeStringNoTag(dataPath); + out.writeBoolNoTag(isCustomDataPath); + protobufStreamOutput.writeOptionalWriteable(seqNoStats); + protobufStreamOutput.writeOptionalWriteable(retentionLeaseStats); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.ROUTING) + .field(Fields.STATE, shardRouting.state()) + .field(Fields.PRIMARY, shardRouting.primary()) + .field(Fields.NODE, shardRouting.currentNodeId()) + .field(Fields.RELOCATING_NODE, shardRouting.relocatingNodeId()) + .endObject(); + + commonStats.toXContent(builder, params); + if (commitStats != null) { + commitStats.toXContent(builder, params); + } + if (seqNoStats != null) { + seqNoStats.toXContent(builder, params); + } + if (retentionLeaseStats != null) { + retentionLeaseStats.toXContent(builder, params); + } + builder.startObject(Fields.SHARD_PATH); + builder.field(Fields.STATE_PATH, statePath); + builder.field(Fields.DATA_PATH, dataPath); + builder.field(Fields.IS_CUSTOM_DATA_PATH, isCustomDataPath); + builder.endObject(); + return builder; + } + + /** + * Fields for parsing and toXContent + * + * @opensearch.internal + */ + static final class Fields { + static final String ROUTING = "routing"; + static final String STATE = "state"; + static final String STATE_PATH = "state_path"; + static final String DATA_PATH = "data_path"; + static final String IS_CUSTOM_DATA_PATH = "is_custom_data_path"; + static final String SHARD_PATH = "shard_path"; + static final String PRIMARY = "primary"; + static final String NODE = "node"; + static final String RELOCATING_NODE = "relocating_node"; + } + +} diff --git a/server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java b/server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java index 3fb3e35d679f0..b352a5c3ae55d 100644 --- a/server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java +++ b/server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java @@ -11,7 +11,6 @@ import com.google.protobuf.CodedInputStream; import org.opensearch.action.ProtobufActionRequest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.common.io.stream.StreamInput; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java b/server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java index 36963fd5894f5..fb5359f370620 100644 --- a/server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java +++ b/server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java @@ -13,10 +13,8 @@ import org.opensearch.Build; import org.opensearch.Version; import org.opensearch.action.ProtobufActionResponse; -import org.opensearch.cluster.ProtobufClusterName; +import org.opensearch.cluster.ClusterName; import org.opensearch.core.ParseField; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -34,7 +32,7 @@ public class ProtobufMainResponse extends ProtobufActionResponse implements ToXC private String nodeName; private Version version; - private ProtobufClusterName clusterName; + private ClusterName clusterName; private String clusterUuid; private Build build; public static final String TAGLINE = "The OpenSearch Project: https://opensearch.org/"; @@ -45,12 +43,12 @@ public class ProtobufMainResponse extends ProtobufActionResponse implements ToXC super(in); nodeName = in.readString(); version = Version.readVersionProtobuf(in); - clusterName = new ProtobufClusterName(in); + clusterName = new ClusterName(in); clusterUuid = in.readString(); build = Build.readBuildProtobuf(in); } - public ProtobufMainResponse(String nodeName, Version version, ProtobufClusterName clusterName, String clusterUuid, Build build) { + public ProtobufMainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build) { this.nodeName = nodeName; this.version = version; this.clusterName = clusterName; @@ -66,7 +64,7 @@ public Version getVersion() { return version; } - public ProtobufClusterName getClusterName() { + public ClusterName getClusterName() { return clusterName; } @@ -117,7 +115,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws static { PARSER.declareString((response, value) -> response.nodeName = value, new ParseField("name")); - PARSER.declareString((response, value) -> response.clusterName = new ProtobufClusterName(value), new ParseField("cluster_name")); + PARSER.declareString((response, value) -> response.clusterName = new ClusterName(value), new ParseField("cluster_name")); PARSER.declareString((response, value) -> response.clusterUuid = value, new ParseField("cluster_uuid")); PARSER.declareString((response, value) -> {}, new ParseField("tagline")); PARSER.declareObject((response, value) -> { diff --git a/server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java b/server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java index d5f91651d2fb0..0385dedaaeab3 100644 --- a/server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java +++ b/server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java @@ -14,8 +14,7 @@ import org.opensearch.action.support.ProtobufActionFilters; import org.opensearch.action.support.ProtobufHandledTransportAction; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.ProtobufClusterName; -import org.opensearch.cluster.ProtobufClusterState; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; @@ -48,7 +47,7 @@ public ProtobufTransportMainAction( @Override protected void doExecute(ProtobufTask task, ProtobufMainRequest request, ActionListener listener) { ClusterState clusterState = clusterService.state(); - ProtobufClusterName clusterName = new ProtobufClusterName(clusterState.getClusterName().value()); + ClusterName clusterName = new ClusterName(clusterState.getClusterName().value()); listener.onResponse( new ProtobufMainResponse(nodeName, Version.CURRENT, clusterName, clusterState.metadata().clusterUUID(), Build.CURRENT) ); diff --git a/server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java b/server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java index 5d85e62ea9def..cf054e8e9ece9 100644 --- a/server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java @@ -22,9 +22,8 @@ * * @opensearch.internal */ -public abstract class ProtobufHandledTransportAction extends ProtobufTransportAction< - Request, - Response> { +public abstract class ProtobufHandledTransportAction extends + ProtobufTransportAction { protected ProtobufHandledTransportAction( String actionName, diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java index 5b2e3b4d927d0..55c915522b404 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java @@ -28,12 +28,12 @@ import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; import org.opensearch.cluster.metadata.ProtobufIndexNameExpressionResolver; import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.node.ProtobufDiscoveryNodes; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterManagerThrottlingException; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.common.unit.TimeValue; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; @@ -53,8 +53,9 @@ * * @opensearch.internal */ -public abstract class ProtobufTransportClusterManagerNodeAction, Response extends ProtobufActionResponse> - extends ProtobufHandledTransportAction { +public abstract class ProtobufTransportClusterManagerNodeAction< + Request extends ProtobufClusterManagerNodeRequest, + Response extends ProtobufActionResponse> extends ProtobufHandledTransportAction { private static final Logger logger = LogManager.getLogger(ProtobufTransportClusterManagerNodeAction.class); @@ -108,7 +109,8 @@ protected void masterOperation(Request request, ProtobufClusterState state, Acti } // TODO: Add abstract keyword after removing the deprecated masterOperation() - protected void clusterManagerOperation(Request request, ProtobufClusterState state, ActionListener listener) throws Exception { + protected void clusterManagerOperation(Request request, ProtobufClusterState state, ActionListener listener) + throws Exception { masterOperation(request, state, listener); } @@ -117,7 +119,8 @@ protected void clusterManagerOperation(Request request, ProtobufClusterState sta * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(ProtobufTask, ProtobufClusterManagerNodeRequest, ProtobufClusterState, ActionListener)} */ @Deprecated - protected void masterOperation(ProtobufTask task, Request request, ProtobufClusterState state, ActionListener listener) throws Exception { + protected void masterOperation(ProtobufTask task, Request request, ProtobufClusterState state, ActionListener listener) + throws Exception { clusterManagerOperation(request, state, listener); } @@ -125,8 +128,12 @@ protected void masterOperation(ProtobufTask task, Request request, ProtobufClust * Override this operation if access to the task parameter is needed */ // TODO: Change the implementation to call 'clusterManagerOperation(request...)' after removing the deprecated masterOperation() - protected void clusterManagerOperation(ProtobufTask task, Request request, ProtobufClusterState state, ActionListener listener) - throws Exception { + protected void clusterManagerOperation( + ProtobufTask task, + Request request, + ProtobufClusterState state, + ActionListener listener + ) throws Exception { masterOperation(task, request, state, listener); } @@ -209,7 +216,8 @@ public Exception getTimeoutException(Exception e) { protected void doStart(ProtobufClusterState clusterState) { try { final ProtobufDiscoveryNodes nodes = clusterState.nodes(); - if (nodes.isLocalNodeElectedClusterManager() || localExecute(request)) { + final DiscoveryNodes discoveryNodes = clusterService.state().nodes(); + if (discoveryNodes.isLocalNodeElectedClusterManager() || localExecute(request)) { // check for block, if blocked, retry, else, execute locally final ClusterBlockException blockException = checkBlock(request, clusterState); if (blockException != null) { @@ -252,13 +260,16 @@ protected void doStart(ProtobufClusterState clusterState) { logger.debug("no known cluster-manager node, scheduling a retry"); retryOnMasterChange(clusterState, null); } else { - ProtobufDiscoveryNode clusterManagerNode = nodes.getClusterManagerNode(); + DiscoveryNode clusterManagerNode = nodes.getClusterManagerNode(); final String actionName = getClusterManagerActionName(clusterManagerNode); transportService.sendRequest( clusterManagerNode, actionName, request, - new ProtobufActionListenerResponseHandler(listener, ProtobufTransportClusterManagerNodeAction.this::read) { + new ProtobufActionListenerResponseHandler( + listener, + ProtobufTransportClusterManagerNodeAction.this::read + ) { @Override public void handleException(final ProtobufTransportException exp) { Throwable cause = exp.unwrapCause(); @@ -334,7 +345,7 @@ public void onTimeout(TimeValue timeout) { * Allows to conditionally return a different cluster-manager node action name in the case an action gets renamed. * This mainly for backwards compatibility should be used rarely */ - protected String getClusterManagerActionName(ProtobufDiscoveryNode node) { + protected String getClusterManagerActionName(DiscoveryNode node) { return actionName; } @@ -342,10 +353,10 @@ protected String getClusterManagerActionName(ProtobufDiscoveryNode node) { * Allows to conditionally return a different cluster-manager node action name in the case an action gets renamed. * This mainly for backwards compatibility should be used rarely * - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #getClusterManagerActionName(ProtobufDiscoveryNode)} + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #getClusterManagerActionName(DiscoveryNode)} */ @Deprecated - protected String getMasterActionName(ProtobufDiscoveryNode node) { + protected String getMasterActionName(DiscoveryNode node) { return getClusterManagerActionName(node); } diff --git a/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodeResponse.java b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodeResponse.java index 5b8f96cbcd083..fb121e23a7953 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodeResponse.java @@ -15,7 +15,7 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.transport.ProtobufTransportResponse; import java.io.IOException; @@ -27,14 +27,14 @@ */ public abstract class ProtobufBaseNodeResponse extends ProtobufTransportResponse { - private ProtobufDiscoveryNode node; + private DiscoveryNode node; protected ProtobufBaseNodeResponse(CodedInputStream in) throws IOException { super(in); - node = new ProtobufDiscoveryNode(in); + node = new DiscoveryNode(in); } - protected ProtobufBaseNodeResponse(ProtobufDiscoveryNode node) { + protected ProtobufBaseNodeResponse(DiscoveryNode node) { assert node != null; this.node = node; } @@ -42,7 +42,7 @@ protected ProtobufBaseNodeResponse(ProtobufDiscoveryNode node) { /** * The node this information relates to. */ - public ProtobufDiscoveryNode getNode() { + public DiscoveryNode getNode() { return node; } diff --git a/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesRequest.java index 3cdb26be01c1e..490c1a8ecf862 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesRequest.java @@ -18,7 +18,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ProtobufActionRequest; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; import org.opensearch.common.unit.TimeValue; @@ -47,7 +47,7 @@ public abstract class ProtobufBaseNodesRequest extends ProtobufActionResponse { - private ProtobufClusterName clusterName; + private ClusterName clusterName; private List failures; private List nodes; private Map nodesMap; @@ -42,27 +42,23 @@ public abstract class ProtobufBaseNodesResponse nodes, - List failures - ) { + protected ProtobufBaseNodesResponse(ClusterName clusterName, List nodes, List failures) { this.clusterName = Objects.requireNonNull(clusterName); this.failures = Objects.requireNonNull(failures); this.nodes = Objects.requireNonNull(nodes); } /** - * Get the {@link ProtobufClusterName} associated with all of the nodes. + * Get the {@link ClusterName} associated with all of the nodes. * * @return Never {@code null}. */ - public ProtobufClusterName getClusterName() { + public ClusterName getClusterName() { return clusterName; } diff --git a/server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java index 4b805811f49c2..59e66c10473aa 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java @@ -16,9 +16,8 @@ import org.opensearch.action.support.ProtobufActionFilters; import org.opensearch.action.support.ProtobufHandledTransportAction; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.tasks.ProtobufTask; import org.opensearch.threadpool.ThreadPool; @@ -165,7 +164,11 @@ protected NodesResponse newResponse(NodesRequest request, AtomicReferenceArray responses, List failures); + protected abstract NodesResponse newResponse( + NodesRequest request, + List responses, + List failures + ); protected abstract NodeRequest newNodeRequest(NodesRequest request); @@ -183,13 +186,13 @@ protected NodeResponse nodeOperation(NodeRequest request, ProtobufTask task) { protected void resolveRequest(NodesRequest request, ClusterState clusterState) { assert request.concreteNodes() == null : "request concreteNodes shouldn't be set"; String[] nodesIds = clusterState.nodes().resolveNodes(request.nodesIds()); - request.setConcreteNodes(Arrays.stream(nodesIds).map(clusterState.nodes()::get).toArray(ProtobufDiscoveryNode[]::new)); + request.setConcreteNodes(Arrays.stream(nodesIds).map(clusterState.nodes()::get).toArray(DiscoveryNode[]::new)); } /** * Get a backwards compatible transport action name */ - protected String getTransportNodeAction(ProtobufDiscoveryNode node) { + protected String getTransportNodeAction(DiscoveryNode node) { return transportNodeAction; } @@ -218,7 +221,7 @@ class AsyncAction { } void start() { - final ProtobufDiscoveryNode[] nodes = request.concreteNodes(); + final DiscoveryNode[] nodes = request.concreteNodes(); if (nodes.length == 0) { // nothing to notify threadPool.generic().execute(() -> listener.onResponse(newResponse(request, responses))); @@ -230,7 +233,7 @@ void start() { } for (int i = 0; i < nodes.length; i++) { final int idx = i; - final ProtobufDiscoveryNode node = nodes[i]; + final DiscoveryNode node = nodes[i]; final String nodeId = node.getId(); try { ProtobufTransportRequest nodeRequest = newNodeRequest(request); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterManagerNodeChangePredicate.java b/server/src/main/java/org/opensearch/cluster/ClusterManagerNodeChangePredicate.java index 13127640beb4f..36efc16f00930 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterManagerNodeChangePredicate.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterManagerNodeChangePredicate.java @@ -33,7 +33,7 @@ package org.opensearch.cluster; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import java.util.function.Predicate; @@ -76,10 +76,10 @@ public static Predicate build(ClusterState currentState) { */ public static Predicate buildProtobuf(ProtobufClusterState currentState) { final long currentVersion = currentState.version(); - final ProtobufDiscoveryNode clusterManagerNode = currentState.nodes().getClusterManagerNode(); + final DiscoveryNode clusterManagerNode = currentState.nodes().getClusterManagerNode(); final String currentMasterId = clusterManagerNode == null ? null : clusterManagerNode.getEphemeralId(); return newState -> { - final ProtobufDiscoveryNode newClusterManager = newState.nodes().getClusterManagerNode(); + final DiscoveryNode newClusterManager = newState.nodes().getClusterManagerNode(); final boolean accept; if (newClusterManager == null) { accept = false; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterName.java b/server/src/main/java/org/opensearch/cluster/ClusterName.java index 440fde284afb8..b74a393bdd658 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterName.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterName.java @@ -32,12 +32,16 @@ package org.opensearch.cluster; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.Objects; import java.util.function.Predicate; @@ -47,7 +51,7 @@ * * @opensearch.internal */ -public class ClusterName implements Writeable { +public class ClusterName implements Writeable, ProtobufWriteable { public static final Setting CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "opensearch", (s) -> { if (s.isEmpty()) { @@ -67,6 +71,10 @@ public ClusterName(StreamInput input) throws IOException { this(input.readString()); } + public ClusterName(CodedInputStream input) throws IOException { + this(input.readString()); + } + public ClusterName(String value) { this.value = value.intern(); } @@ -80,6 +88,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(value); } + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeStringNoTag(value); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterState.java b/server/src/main/java/org/opensearch/cluster/ClusterState.java index bf4d7977222ab..e7c552dfdee60 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterState.java @@ -32,6 +32,7 @@ package org.opensearch.cluster; + import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -770,6 +771,25 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr return builder.build(); } + // public static ClusterState readFrom(CodedInputStream in, DiscoveryNode localNode) throws IOException { + // ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + // ClusterName clusterName = new ClusterName(in); + // Builder builder = new Builder(clusterName); + // builder.version = in.readInt64(); + // builder.uuid = in.readString(); + // builder.metadata = Metadata.readFrom(in); + // builder.routingTable = RoutingTable.readFrom(in); + // builder.nodes = ProtobufDiscoveryNodes.readFrom(in, localNode); + // builder.blocks = ClusterBlocks.readFrom(in); + // int customSize = in.readInt32(); + // for (int i = 0; i < customSize; i++) { + // Custom customIndexMetadata = protobufStreamInput.readNamedWriteable(Custom.class); + // builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata); + // } + // builder.minimumClusterManagerNodesOnPublishingClusterManager = in.readInt32(); + // return builder.build(); + // } + @Override public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufClusterName.java b/server/src/main/java/org/opensearch/cluster/ProtobufClusterName.java deleted file mode 100644 index a7980d7cbd3ed..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/ProtobufClusterName.java +++ /dev/null @@ -1,76 +0,0 @@ -/* -* SPDX-License-Identifier: Apache-2.0 -* -* The OpenSearch Contributors require contributions made to -* this file be licensed under the Apache-2.0 license or a -* compatible open source license. -*/ - -/* -* Modifications Copyright OpenSearch Contributors. See -* GitHub history for details. -*/ - -package org.opensearch.cluster; - -import com.google.protobuf.CodedInputStream; -import com.google.protobuf.CodedOutputStream; -import org.opensearch.common.io.stream.ProtobufWriteable; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; - -import java.io.IOException; -import java.util.function.Predicate; - -/** - * Cluster Name -* -* @opensearch.internal -*/ -public class ProtobufClusterName implements ProtobufWriteable { - - public static final Setting CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "opensearch", (s) -> { - if (s.isEmpty()) { - throw new IllegalArgumentException("[cluster.name] must not be empty"); - } - if (s.contains(":")) { - throw new IllegalArgumentException("[cluster.name] must not contain ':'"); - } - return new ProtobufClusterName(s); - }, Setting.Property.NodeScope); - - public static final ProtobufClusterName DEFAULT = CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); - - private final String value; - - public ProtobufClusterName(CodedInputStream input) throws IOException { - this(input.readString()); - } - - public ProtobufClusterName(String value) { - this.value = value.intern(); - } - - public String value() { - return this.value; - } - - @Override - public void writeTo(CodedOutputStream out) throws IOException { - out.writeStringNoTag(value); - } - - public Predicate getEqualityPredicate() { - return new Predicate() { - @Override - public boolean test(ProtobufClusterName o) { - return ProtobufClusterName.this.equals(o); - } - - @Override - public String toString() { - return "local cluster name [" + ProtobufClusterName.this.value() + "]"; - } - }; - } -} diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java b/server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java index 5973eab393121..d1faa196a8dd3 100644 --- a/server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java @@ -22,9 +22,10 @@ import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.ProtobufDiscoveryNodes; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; @@ -80,8 +81,7 @@ */ public class ProtobufClusterState implements ToXContentFragment, ProtobufDiffable { - public static final ProtobufClusterState EMPTY_STATE = builder(ProtobufClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .build(); + public static final ProtobufClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); /** * An interface that implementors use when a class requires a client to maybe have a feature. @@ -159,7 +159,7 @@ default boolean isPrivate() { private final ImmutableOpenMap customs; - private final ProtobufClusterName clusterName; + private final ClusterName clusterName; private final boolean wasReadFromDiff; @@ -184,7 +184,7 @@ public ProtobufClusterState(long version, String stateUUID, ProtobufClusterState } public ProtobufClusterState( - ProtobufClusterName clusterName, + ClusterName clusterName, long version, String stateUUID, Metadata metadata, @@ -287,7 +287,7 @@ public T custom(String type, T defaultValue) { return (T) customs.getOrDefault(type, defaultValue); } - public ProtobufClusterName getClusterName() { + public ClusterName getClusterName() { return this.clusterName; } @@ -426,7 +426,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws // nodes if (metrics.contains(Metric.NODES)) { builder.startObject("nodes"); - for (ProtobufDiscoveryNode node : nodes) { + for (DiscoveryNode node : nodes) { node.toXContent(builder, params); } builder.endObject(); @@ -490,7 +490,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static Builder builder(ProtobufClusterName clusterName) { + public static Builder builder(ClusterName clusterName) { return new Builder(clusterName); } @@ -498,6 +498,74 @@ public static Builder builder(ProtobufClusterState state) { return new Builder(state); } + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + final String TAB = " "; + sb.append("cluster uuid: ") + .append(metadata.clusterUUID()) + .append(" [committed: ") + .append(metadata.clusterUUIDCommitted()) + .append("]") + .append("\n"); + sb.append("version: ").append(version).append("\n"); + sb.append("state uuid: ").append(stateUUID).append("\n"); + sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); + sb.append("meta data version: ").append(metadata.version()).append("\n"); + sb.append(TAB).append("coordination_metadata:\n"); + sb.append(TAB).append(TAB).append("term: ").append(coordinationMetadata().term()).append("\n"); + sb.append(TAB) + .append(TAB) + .append("last_committed_config: ") + .append(coordinationMetadata().getLastCommittedConfiguration()) + .append("\n"); + sb.append(TAB) + .append(TAB) + .append("last_accepted_config: ") + .append(coordinationMetadata().getLastAcceptedConfiguration()) + .append("\n"); + sb.append(TAB).append(TAB).append("voting tombstones: ").append(coordinationMetadata().getVotingConfigExclusions()).append("\n"); + for (IndexMetadata indexMetadata : metadata) { + sb.append(TAB).append(indexMetadata.getIndex()); + sb.append(": v[") + .append(indexMetadata.getVersion()) + .append("], mv[") + .append(indexMetadata.getMappingVersion()) + .append("], sv[") + .append(indexMetadata.getSettingsVersion()) + .append("], av[") + .append(indexMetadata.getAliasesVersion()) + .append("]\n"); + for (int shard = 0; shard < indexMetadata.getNumberOfShards(); shard++) { + sb.append(TAB).append(TAB).append(shard).append(": "); + sb.append("p_term [").append(indexMetadata.primaryTerm(shard)).append("], "); + sb.append("isa_ids ").append(indexMetadata.inSyncAllocationIds(shard)).append("\n"); + } + } + if (metadata.customs().isEmpty() == false) { + sb.append("metadata customs:\n"); + for (final Map.Entry cursor : metadata.customs().entrySet()) { + final String type = cursor.getKey(); + final Metadata.Custom custom = cursor.getValue(); + sb.append(TAB).append(type).append(": ").append(custom); + } + sb.append("\n"); + } + sb.append(blocks()); + sb.append(nodes()); + sb.append(routingTable()); + // sb.append(getRoutingNodes()); + if (customs.isEmpty() == false) { + sb.append("customs:\n"); + for (ObjectObjectCursor cursor : customs) { + final String type = cursor.key; + final Custom custom = cursor.value; + sb.append(TAB).append(type).append(": ").append(custom); + } + } + return sb.toString(); + } + /** * Builder for cluster state. * @@ -505,7 +573,7 @@ public static Builder builder(ProtobufClusterState state) { */ public static class Builder { - private final ProtobufClusterName clusterName; + private final ClusterName clusterName; private long version = 0; private String uuid = UNKNOWN_UUID; private Metadata metadata = Metadata.EMPTY_METADATA; @@ -529,7 +597,7 @@ public Builder(ProtobufClusterState state) { this.fromDiff = false; } - public Builder(ProtobufClusterName clusterName) { + public Builder(ClusterName clusterName) { customs = ImmutableOpenMap.builder(); this.clusterName = clusterName; } @@ -659,7 +727,7 @@ public static ProtobufDiff readDiffFrom(CodedInputStream i public static ProtobufClusterState readFrom(CodedInputStream in, DiscoveryNode localNode) throws IOException { ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); - ProtobufClusterName clusterName = new ProtobufClusterName(in); + ClusterName clusterName = new ClusterName(in); Builder builder = new Builder(clusterName); builder.version = in.readInt64(); builder.uuid = in.readString(); @@ -715,7 +783,7 @@ private static class ClusterStateDiff implements ProtobufDiff routingTable; @@ -748,7 +816,7 @@ private static class ClusterStateDiff implements ProtobufDiff throw new OpenSearchException("already waiting for a cluster state change"); } // clusterApplierService.addTimeoutListener( - // timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), - // clusterStateListener + // timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), + // clusterStateListener // ); } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 1c38e68c43466..ba2e4560beec1 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -42,6 +42,8 @@ import org.opensearch.cluster.ClusterStateTaskConfig; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.LocalClusterUpdateTask; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ProtobufClusterState; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.ClusterFormationFailureHelper.ClusterFormationState; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; @@ -52,6 +54,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.node.ProtobufDiscoveryNodes; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplier; @@ -80,6 +83,7 @@ import org.opensearch.discovery.DiscoveryStats; import org.opensearch.discovery.HandshakingTransportAddressConnector; import org.opensearch.discovery.PeerFinder; +import org.opensearch.discovery.ProtobufDiscoveryStats; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.discovery.SeedHostsResolver; import org.opensearch.monitor.NodeHealthService; @@ -851,8 +855,17 @@ protected void doStart() { ) .nodes(DiscoveryNodes.builder().add(getLocalNode()).localNodeId(getLocalNode().getId())) .build(); + ProtobufClusterState protobufInitialState = ProtobufClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) + .blocks( + ClusterBlocks.builder() + .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK) + .addGlobalBlock(noClusterManagerBlockService.getNoClusterManagerBlock()) + ) + .nodes(ProtobufDiscoveryNodes.builder().add(getLocalNode()).localNodeId(getLocalNode().getId())) + .build(); applierState = initialState; clusterApplier.setInitialState(initialState); + clusterApplier.setInitialProtobufState(protobufInitialState); } } @@ -861,6 +874,11 @@ public DiscoveryStats stats() { return new DiscoveryStats(new PendingClusterStateStats(0, 0, 0), publicationHandler.stats()); } + @Override + public ProtobufDiscoveryStats protobufStats() { + return new ProtobufDiscoveryStats(new ProtobufPendingClusterStateStats(0, 0, 0), publicationHandler.protobufStats()); + } + @Override public void startInitialJoin() { synchronized (mutex) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 60c931a601561..3e426ce557ba9 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -159,6 +159,14 @@ public PublishClusterStateStats stats() { ); } + public ProtobufPublishClusterStateStats protobufStats() { + return new ProtobufPublishClusterStateStats( + fullClusterStateReceivedCount.get(), + incompatibleClusterStateDiffReceivedCount.get(), + compatibleClusterStateDiffReceivedCount.get() + ); + } + private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportRequest request) throws IOException { try (StreamInput in = CompressedStreamUtils.decompressBytes(request, namedWriteableRegistry)) { ClusterState incomingState; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 3e36a5e65bf07..7e77821521faf 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -32,6 +32,8 @@ package org.opensearch.cluster.metadata; +import com.google.protobuf.CodedInputStream; + import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.CollectionUtil; @@ -92,6 +94,7 @@ import java.util.stream.StreamSupport; import static org.opensearch.common.settings.Settings.readSettingsFromStream; +import static org.opensearch.common.settings.Settings.readSettingsFromStreamProtobuf; import static org.opensearch.common.settings.Settings.writeSettingsToStream; /** @@ -1062,6 +1065,31 @@ public static Metadata readFrom(StreamInput in) throws IOException { return builder.build(); } + public static Metadata readFrom(CodedInputStream in) throws IOException { + Builder builder = new Builder(); + builder.version = in.readInt64(); + builder.clusterUUID = in.readString(); + builder.clusterUUIDCommitted = in.readBool(); + // builder.coordinationMetadata(new CoordinationMetadata(in)); + builder.transientSettings(readSettingsFromStreamProtobuf(in)); + builder.persistentSettings(readSettingsFromStreamProtobuf(in)); + // builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); + int size = in.readInt32(); + // for (int i = 0; i < size; i++) { + // builder.put(IndexMetadata.readFrom(in), false); + // } + size = in.readInt32(); + // for (int i = 0; i < size; i++) { + // builder.put(IndexTemplateMetadata.readFrom(in)); + // } + int customSize = in.readInt32(); + // for (int i = 0; i < customSize; i++) { + // Custom customIndexMetadata = in.readNamedWriteable(Custom.class); + // builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata); + // } + return builder.build(); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ProtobufIndexNameExpressionResolver.java b/server/src/main/java/org/opensearch/cluster/metadata/ProtobufIndexNameExpressionResolver.java index dbb1bc319575d..8a8e2b2545150 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ProtobufIndexNameExpressionResolver.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ProtobufIndexNameExpressionResolver.java @@ -6,7 +6,6 @@ * compatible open source license. */ - package org.opensearch.cluster.metadata; import org.opensearch.OpenSearchParseException; @@ -130,7 +129,12 @@ public String[] concreteIndexNames(ProtobufClusterState state, IndicesOptions op return concreteIndexNames(context, indexExpressions); } - public String[] concreteIndexNames(ProtobufClusterState state, IndicesOptions options, boolean includeDataStreams, String... indexExpressions) { + public String[] concreteIndexNames( + ProtobufClusterState state, + IndicesOptions options, + boolean includeDataStreams, + String... indexExpressions + ) { Context context = new Context(state, options, false, false, includeDataStreams, isSystemIndexAccessAllowed()); return concreteIndexNames(context, indexExpressions); } @@ -173,7 +177,12 @@ public Index[] concreteIndices(ProtobufClusterState state, IndicesOptions option return concreteIndices(state, options, false, indexExpressions); } - public Index[] concreteIndices(ProtobufClusterState state, IndicesOptions options, boolean includeDataStreams, String... indexExpressions) { + public Index[] concreteIndices( + ProtobufClusterState state, + IndicesOptions options, + boolean includeDataStreams, + String... indexExpressions + ) { Context context = new Context(state, options, false, false, includeDataStreams, isSystemIndexAccessAllowed()); return concreteIndices(context, indexExpressions); } diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index d6ba0199d193c..56cd468d3d3a4 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -34,16 +34,23 @@ import org.opensearch.Version; import org.opensearch.common.UUIDs; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.Node; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.Collections; import java.util.HashMap; @@ -66,7 +73,7 @@ * * @opensearch.internal */ -public class DiscoveryNode implements Writeable, ToXContentFragment { +public class DiscoveryNode implements Writeable, ProtobufWriteable, ToXContentFragment { static final String COORDINATING_ONLY = "coordinating_only"; @@ -347,6 +354,53 @@ public DiscoveryNode(StreamInput in) throws IOException { this.version = in.readVersion(); } + /** + * Creates a new {@link DiscoveryNode} by reading from the stream provided as argument + * @param in the stream + * @throws IOException if there is an error while reading from the stream + */ + public DiscoveryNode(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + this.nodeName = in.readString(); + this.nodeId = in.readString(); + this.ephemeralId = in.readString(); + this.hostName = in.readString(); + this.hostAddress = in.readString(); + ProtobufTransportAddress protobufTransportAddress = new ProtobufTransportAddress(in); + this.address = new TransportAddress(protobufTransportAddress.address()); + // this.address = new TransportAddress(in); + int size = in.readInt32(); + this.attributes = new HashMap<>(size); + for (int i = 0; i < size; i++) { + this.attributes.put(in.readString(), in.readString()); + } + int rolesSize = in.readInt32(); + final Set roles = new HashSet<>(rolesSize); + for (int i = 0; i < rolesSize; i++) { + final String roleName = in.readString(); + final String roleNameAbbreviation = in.readString(); + final boolean canContainData = in.readBool(); + final DiscoveryNodeRole role = roleMap.get(roleName); + if (role == null) { + if (protobufStreamInput.getVersion().onOrAfter(Version.V_2_1_0)) { + roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); + } else { + roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); + } + } else { + assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; + assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" + + roleName + + "] does not match role [" + + role.roleNameAbbreviation() + + "]"; + roles.add(role); + } + } + this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(roles)); + this.version = Version.readVersionProtobuf(in); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeName); @@ -370,6 +424,32 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVersion(version); } + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + out.writeStringNoTag(nodeName); + out.writeStringNoTag(nodeId); + out.writeStringNoTag(ephemeralId); + out.writeStringNoTag(hostName); + out.writeStringNoTag(hostAddress); + ProtobufTransportAddress protobufTransportAddress = new ProtobufTransportAddress(address.address()); + protobufTransportAddress.writeTo(out); + // address.writeTo(out); + out.writeInt32NoTag(attributes.size()); + for (Map.Entry entry : attributes.entrySet()) { + out.writeStringNoTag(entry.getKey()); + out.writeStringNoTag(entry.getValue()); + } + out.writeInt32NoTag(roles.size()); + for (final DiscoveryNodeRole role : roles) { + final DiscoveryNodeRole compatibleRole = role.getCompatibilityRole(protobufStreamOutput.getVersion()); + out.writeStringNoTag(compatibleRole.roleName()); + out.writeStringNoTag(compatibleRole.roleNameAbbreviation()); + out.writeBoolNoTag(compatibleRole.canContainData()); + } + out.writeInt32NoTag(version.id); + } + /** * The address that the node can be communicated with. */ @@ -377,6 +457,13 @@ public TransportAddress getAddress() { return address; } + /** + * The address that the node can be communicated with. + */ + public ProtobufTransportAddress getProtobufAddress() { + return new ProtobufTransportAddress(address.address()); + } + /** * The unique id of the node. */ diff --git a/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNode.java deleted file mode 100644 index a05d519744fb5..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNode.java +++ /dev/null @@ -1,537 +0,0 @@ -/* -* SPDX-License-Identifier: Apache-2.0 -* -* The OpenSearch Contributors require contributions made to -* this file be licensed under the Apache-2.0 license or a -* compatible open source license. -*/ - -/* -* Modifications Copyright OpenSearch Contributors. See -* GitHub history for details. -*/ - -package org.opensearch.cluster.node; - -import com.google.protobuf.CodedInputStream; -import com.google.protobuf.CodedOutputStream; -import org.opensearch.Version; -import org.opensearch.common.UUIDs; -import org.opensearch.common.io.stream.ProtobufStreamInput; -import org.opensearch.common.io.stream.ProtobufStreamOutput; -import org.opensearch.common.io.stream.ProtobufWriteable; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.ProtobufTransportAddress; -import org.opensearch.core.xcontent.ToXContentFragment; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.node.Node; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; -import java.util.function.Function; -import java.util.function.Predicate; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static org.opensearch.node.NodeRoleSettings.NODE_ROLES_SETTING; - -/** - * A discovery node represents a node that is part of the cluster. -* -* @opensearch.internal -*/ -public class ProtobufDiscoveryNode implements ProtobufWriteable, ToXContentFragment { - - static final String COORDINATING_ONLY = "coordinating_only"; - - public static boolean nodeRequiresLocalStorage(Settings settings) { - boolean localStorageEnable = Node.NODE_LOCAL_STORAGE_SETTING.get(settings); - if (localStorageEnable == false && (isDataNode(settings) || isClusterManagerNode(settings))) { - // TODO: make this a proper setting validation logic, requiring multi-settings validation - throw new IllegalArgumentException("storage can not be disabled for cluster-manager and data nodes"); - } - return localStorageEnable; - } - - public static boolean hasRole(final Settings settings, final DiscoveryNodeRole role) { - /* - * This method can be called before the o.e.n.NodeRoleSettings.NODE_ROLES_SETTING is initialized. We do not want to trigger - * initialization prematurely because that will bake the default roles before plugins have had a chance to register them. Therefore, - * to avoid initializing this setting prematurely, we avoid using the actual node roles setting instance here. - */ - if (settings.hasValue("node.roles")) { - return settings.getAsList("node.roles").contains(role.roleName()); - } else if (role.legacySetting() != null && settings.hasValue(role.legacySetting().getKey())) { - return role.legacySetting().get(settings); - } else { - return role.isEnabledByDefault(settings); - } - } - - public static boolean isClusterManagerNode(Settings settings) { - return hasRole(settings, DiscoveryNodeRole.MASTER_ROLE) || hasRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerNode(Settings)} */ - @Deprecated - public static boolean isMasterNode(Settings settings) { - return isClusterManagerNode(settings); - } - - /** - * Due to the way that plugins may not be available when settings are being initialized, - * not all roles may be available from a static/initializing context such as a {@link Setting} - * default value function. In that case, be warned that this may not include all plugin roles. - */ - public static boolean isDataNode(final Settings settings) { - return getRolesFromSettings(settings).stream().anyMatch(DiscoveryNodeRole::canContainData); - } - - public static boolean isIngestNode(Settings settings) { - return hasRole(settings, DiscoveryNodeRole.INGEST_ROLE); - } - - public static boolean isRemoteClusterClient(final Settings settings) { - return hasRole(settings, DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE); - } - - public static boolean isSearchNode(Settings settings) { - return hasRole(settings, DiscoveryNodeRole.SEARCH_ROLE); - } - - private final String nodeName; - private final String nodeId; - private final String ephemeralId; - private final String hostName; - private final String hostAddress; - private final ProtobufTransportAddress address; - private final Map attributes; - private final Version version; - private final SortedSet roles; - - /** - * Creates a new {@link ProtobufDiscoveryNode} - *

- * Note: if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current - * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used - * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered - * and updated. - *

- * - * @param id the nodes unique (persistent) node id. This constructor will auto generate a random ephemeral id. - * @param address the nodes transport address - * @param version the version of the node - */ - public ProtobufDiscoveryNode(final String id, ProtobufTransportAddress address, Version version) { - this(id, address, Collections.emptyMap(), DiscoveryNodeRole.BUILT_IN_ROLES, version); - } - - /** - * Creates a new {@link ProtobufDiscoveryNode} - *

- * Note: if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current - * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used - * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered - * and updated. - *

- * - * @param id the nodes unique (persistent) node id. This constructor will auto generate a random ephemeral id. - * @param address the nodes transport address - * @param attributes node attributes - * @param roles node roles - * @param version the version of the node - */ - public ProtobufDiscoveryNode( - String id, - ProtobufTransportAddress address, - Map attributes, - Set roles, - Version version - ) { - this("", id, address, attributes, roles, version); - } - - /** - * Creates a new {@link ProtobufDiscoveryNode} - *

- * Note: if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current - * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used - * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered - * and updated. - *

- * - * @param nodeName the nodes name - * @param nodeId the nodes unique persistent id. An ephemeral id will be auto generated. - * @param address the nodes transport address - * @param attributes node attributes - * @param roles node roles - * @param version the version of the node - */ - public ProtobufDiscoveryNode( - String nodeName, - String nodeId, - ProtobufTransportAddress address, - Map attributes, - Set roles, - Version version - ) { - this( - nodeName, - nodeId, - UUIDs.randomBase64UUID(), - address.address().getHostString(), - address.getAddress(), - address, - attributes, - roles, - version - ); - } - - /** - * Creates a new {@link ProtobufDiscoveryNode}. - *

- * Note: if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current - * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used - * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered - * and updated. - *

- * - * @param nodeName the nodes name - * @param nodeId the nodes unique persistent id - * @param ephemeralId the nodes unique ephemeral id - * @param hostAddress the nodes host address - * @param address the nodes transport address - * @param attributes node attributes - * @param roles node roles - * @param version the version of the node - */ - public ProtobufDiscoveryNode( - String nodeName, - String nodeId, - String ephemeralId, - String hostName, - String hostAddress, - ProtobufTransportAddress address, - Map attributes, - Set roles, - Version version - ) { - if (nodeName != null) { - this.nodeName = nodeName.intern(); - } else { - this.nodeName = ""; - } - this.nodeId = nodeId.intern(); - this.ephemeralId = ephemeralId.intern(); - this.hostName = hostName.intern(); - this.hostAddress = hostAddress.intern(); - this.address = address; - if (version == null) { - this.version = Version.CURRENT; - } else { - this.version = version; - } - this.attributes = Collections.unmodifiableMap(attributes); - // verify that no node roles are being provided as attributes - Predicate> predicate = (attrs) -> { - boolean success = true; - for (final DiscoveryNodeRole role : ProtobufDiscoveryNode.roleMap.values()) { - success &= attrs.containsKey(role.roleName()) == false; - assert success : role.roleName(); - } - return success; - }; - assert predicate.test(attributes) : attributes; - this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(roles)); - } - - /** Creates a ProtobufDiscoveryNode representing the local node. */ - public static ProtobufDiscoveryNode createLocal(Settings settings, ProtobufTransportAddress publishAddress, String nodeId) { - Map attributes = Node.NODE_ATTRIBUTES.getAsMap(settings); - Set roles = getRolesFromSettings(settings); - return new ProtobufDiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress, attributes, roles, Version.CURRENT); - } - - /** extract node roles from the given settings */ - public static Set getRolesFromSettings(final Settings settings) { - if (NODE_ROLES_SETTING.exists(settings)) { - validateLegacySettings(settings, roleMap); - return Collections.unmodifiableSet(new HashSet<>(NODE_ROLES_SETTING.get(settings))); - } else { - return roleMap.values().stream().filter(s -> s.isEnabledByDefault(settings)).collect(Collectors.toSet()); - } - } - - private static void validateLegacySettings(final Settings settings, final Map roleMap) { - for (final DiscoveryNodeRole role : roleMap.values()) { - if (role.legacySetting() != null && role.legacySetting().exists(settings)) { - final String message = String.format( - Locale.ROOT, - "can not explicitly configure node roles and use legacy role setting [%s]=[%s]", - role.legacySetting().getKey(), - role.legacySetting().get(settings) - ); - throw new IllegalArgumentException(message); - } - } - } - - /** - * Creates a new {@link ProtobufDiscoveryNode} by reading from the stream provided as argument - * @param in the stream - * @throws IOException if there is an error while reading from the stream - */ - public ProtobufDiscoveryNode(CodedInputStream in) throws IOException { - ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); - this.nodeName = in.readString(); - this.nodeId = in.readString(); - this.ephemeralId = in.readString(); - this.hostName = in.readString(); - this.hostAddress = in.readString(); - this.address = new ProtobufTransportAddress(in); - int size = in.readInt32(); - this.attributes = new HashMap<>(size); - for (int i = 0; i < size; i++) { - this.attributes.put(in.readString(), in.readString()); - } - int rolesSize = in.readInt32(); - final Set roles = new HashSet<>(rolesSize); - for (int i = 0; i < rolesSize; i++) { - final String roleName = in.readString(); - final String roleNameAbbreviation = in.readString(); - final boolean canContainData = in.readBool(); - final DiscoveryNodeRole role = roleMap.get(roleName); - if (role == null) { - if (protobufStreamInput.getVersion().onOrAfter(Version.V_2_1_0)) { - roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); - } else { - roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); - } - } else { - assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; - assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" - + roleName - + "] does not match role [" - + role.roleNameAbbreviation() - + "]"; - roles.add(role); - } - } - this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(roles)); - this.version = Version.readVersionProtobuf(in); - } - - @Override - public void writeTo(CodedOutputStream out) throws IOException { - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); - out.writeStringNoTag(nodeName); - out.writeStringNoTag(nodeId); - out.writeStringNoTag(ephemeralId); - out.writeStringNoTag(hostName); - out.writeStringNoTag(hostAddress); - address.writeTo(out); - out.writeInt32NoTag(attributes.size()); - for (Map.Entry entry : attributes.entrySet()) { - out.writeStringNoTag(entry.getKey()); - out.writeStringNoTag(entry.getValue()); - } - out.writeInt32NoTag(roles.size()); - for (final DiscoveryNodeRole role : roles) { - final DiscoveryNodeRole compatibleRole = role.getCompatibilityRole(protobufStreamOutput.getVersion()); - out.writeStringNoTag(compatibleRole.roleName()); - out.writeStringNoTag(compatibleRole.roleNameAbbreviation()); - out.writeBoolNoTag(compatibleRole.canContainData()); - } - out.writeInt32NoTag(version.id); - } - - /** - * The address that the node can be communicated with. - */ - public ProtobufTransportAddress getAddress() { - return address; - } - - /** - * The unique id of the node. - */ - public String getId() { - return nodeId; - } - - /** - * The unique ephemeral id of the node. Ephemeral ids are meant to be attached the life span - * of a node process. When ever a node is restarted, it's ephemeral id is required to change (while it's {@link #getId()} - * will be read from the data folder and will remain the same across restarts). - */ - public String getEphemeralId() { - return ephemeralId; - } - - /** - * The name of the node. - */ - public String getName() { - return this.nodeName; - } - - /** - * The node attributes. - */ - public Map getAttributes() { - return this.attributes; - } - - /** - * Should this node hold data (shards) or not. - */ - public boolean isDataNode() { - return roles.stream().anyMatch(DiscoveryNodeRole::canContainData); - } - - /** - * Can this node become cluster-manager or not. - */ - public boolean isClusterManagerNode() { - return roles.contains(DiscoveryNodeRole.MASTER_ROLE) || roles.contains(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); - } - - /** - * Can this node become cluster-manager or not. - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerNode()} - */ - @Deprecated - public boolean isMasterNode() { - return isClusterManagerNode(); - } - - /** - * Returns a boolean that tells whether this an ingest node or not - */ - public boolean isIngestNode() { - return roles.contains(DiscoveryNodeRole.INGEST_ROLE); - } - - /** - * Returns whether or not the node can be a remote cluster client. - * - * @return true if the node can be a remote cluster client, false otherwise - */ - public boolean isRemoteClusterClient() { - return roles.contains(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE); - } - - /** - * Returns whether the node is dedicated to provide search capability. - * - * @return true if the node contains search role, false otherwise - */ - public boolean isSearchNode() { - return roles.contains(DiscoveryNodeRole.SEARCH_ROLE); - } - - /** - * Returns a set of all the roles that the node has. The roles are returned in sorted order by the role name. - *

- * If a node does not have any specific role, the returned set is empty, which means that the node is a coordinating-only node. - * - * @return the sorted set of roles - */ - public Set getRoles() { - return roles; - } - - public Version getVersion() { - return this.version; - } - - public String getHostName() { - return this.hostName; - } - - public String getHostAddress() { - return this.hostAddress; - } - - private static Map rolesToMap(final Stream roles) { - return Collections.unmodifiableMap(roles.collect(Collectors.toMap(DiscoveryNodeRole::roleName, Function.identity()))); - } - - private static Map roleMap = rolesToMap(DiscoveryNodeRole.BUILT_IN_ROLES.stream()); - - public static DiscoveryNodeRole getRoleFromRoleName(final String roleName) { - // As we are supporting dynamic role, should make role name case-insensitive to avoid confusion of role name like "Data"/"DATA" - String lowerCasedRoleName = Objects.requireNonNull(roleName).toLowerCase(Locale.ROOT); - if (roleMap.containsKey(lowerCasedRoleName)) { - return roleMap.get(lowerCasedRoleName); - } - return new DiscoveryNodeRole.DynamicRole(lowerCasedRoleName, lowerCasedRoleName, false); - } - - public static Set getPossibleRoles() { - return Collections.unmodifiableSet(new HashSet<>(roleMap.values())); - } - - public static void setAdditionalRoles(final Set additionalRoles) { - assert additionalRoles.stream().allMatch(r -> r.legacySetting() == null || r.legacySetting().isDeprecated()) : additionalRoles; - final Map roleNameToPossibleRoles = rolesToMap( - Stream.concat(DiscoveryNodeRole.BUILT_IN_ROLES.stream(), additionalRoles.stream()) - ); - // collect the abbreviation names into a map to ensure that there are not any duplicate abbreviations - final Map roleNameAbbreviationToPossibleRoles = Collections.unmodifiableMap( - roleNameToPossibleRoles.values() - .stream() - .collect(Collectors.toMap(DiscoveryNodeRole::roleNameAbbreviation, Function.identity())) - ); - assert roleNameToPossibleRoles.size() == roleNameAbbreviationToPossibleRoles.size() : "roles by name [" - + roleNameToPossibleRoles - + "], roles by name abbreviation [" - + roleNameAbbreviationToPossibleRoles - + "]"; - roleMap = roleNameToPossibleRoles; - } - - /** - * Load the deprecated {@link DiscoveryNodeRole#MASTER_ROLE}. - * Master role is not added into BUILT_IN_ROLES, because {@link #setAdditionalRoles(Set)} check role name abbreviation duplication, - * and CLUSTER_MANAGER_ROLE has the same abbreviation name with MASTER_ROLE. - */ - public static void setDeprecatedMasterRole() { - final Map modifiableRoleMap = new HashMap<>(roleMap); - modifiableRoleMap.put(DiscoveryNodeRole.MASTER_ROLE.roleName(), DiscoveryNodeRole.MASTER_ROLE); - roleMap = Collections.unmodifiableMap(modifiableRoleMap); - } - - public static Set getPossibleRoleNames() { - return roleMap.keySet(); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(getId()); - builder.field("name", getName()); - builder.field("ephemeral_id", getEphemeralId()); - builder.field("transport_address", getAddress().toString()); - - builder.startObject("attributes"); - for (Map.Entry entry : attributes.entrySet()) { - builder.field(entry.getKey(), entry.getValue()); - } - builder.endObject(); - - builder.endObject(); - return builder; - } - -} diff --git a/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNodes.java index b2594459dd5e2..b49a286178ae1 100644 --- a/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/ProtobufDiscoveryNodes.java @@ -37,19 +37,19 @@ import java.util.stream.StreamSupport; /** - * This class holds all {@link ProtobufDiscoveryNode} in the cluster and provides convenience methods to + * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to * access, modify merge / diff discovery nodes. * * @opensearch.internal */ -public class ProtobufDiscoveryNodes extends ProtobufAbstractDiffable implements Iterable { +public class ProtobufDiscoveryNodes extends ProtobufAbstractDiffable implements Iterable { public static final ProtobufDiscoveryNodes EMPTY_NODES = builder().build(); - private final ImmutableOpenMap nodes; - private final ImmutableOpenMap dataNodes; - private final ImmutableOpenMap clusterManagerNodes; - private final ImmutableOpenMap ingestNodes; + private final ImmutableOpenMap nodes; + private final ImmutableOpenMap dataNodes; + private final ImmutableOpenMap clusterManagerNodes; + private final ImmutableOpenMap ingestNodes; private final String clusterManagerNodeId; private final String localNodeId; @@ -59,10 +59,10 @@ public class ProtobufDiscoveryNodes extends ProtobufAbstractDiffable nodes, - ImmutableOpenMap dataNodes, - ImmutableOpenMap clusterManagerNodes, - ImmutableOpenMap ingestNodes, + ImmutableOpenMap nodes, + ImmutableOpenMap dataNodes, + ImmutableOpenMap clusterManagerNodes, + ImmutableOpenMap ingestNodes, String clusterManagerNodeId, String localNodeId, Version minNonClientNodeVersion, @@ -83,7 +83,7 @@ private ProtobufDiscoveryNodes( } @Override - public Iterator iterator() { + public Iterator iterator() { return nodes.valuesIt(); } @@ -122,7 +122,7 @@ public int getSize() { * * @return {@link Map} of the discovered nodes arranged by their ids */ - public ImmutableOpenMap getNodes() { + public ImmutableOpenMap getNodes() { return this.nodes; } @@ -131,7 +131,7 @@ public ImmutableOpenMap getNodes() { * * @return {@link Map} of the discovered data nodes arranged by their ids */ - public ImmutableOpenMap getDataNodes() { + public ImmutableOpenMap getDataNodes() { return this.dataNodes; } @@ -140,7 +140,7 @@ public ImmutableOpenMap getDataNodes() { * * @return {@link Map} of the discovered cluster-manager nodes arranged by their ids */ - public ImmutableOpenMap getClusterManagerNodes() { + public ImmutableOpenMap getClusterManagerNodes() { return this.clusterManagerNodes; } @@ -151,14 +151,14 @@ public ImmutableOpenMap getClusterManagerNodes() * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNodes()} */ @Deprecated - public ImmutableOpenMap getMasterNodes() { + public ImmutableOpenMap getMasterNodes() { return getClusterManagerNodes(); } /** * @return All the ingest nodes arranged by their ids */ - public ImmutableOpenMap getIngestNodes() { + public ImmutableOpenMap getIngestNodes() { return ingestNodes; } @@ -167,8 +167,8 @@ public ImmutableOpenMap getIngestNodes() { * * @return {@link Map} of the discovered cluster-manager and data nodes arranged by their ids */ - public ImmutableOpenMap getClusterManagerAndDataNodes() { - ImmutableOpenMap.Builder nodes = ImmutableOpenMap.builder(dataNodes); + public ImmutableOpenMap getClusterManagerAndDataNodes() { + ImmutableOpenMap.Builder nodes = ImmutableOpenMap.builder(dataNodes); nodes.putAll(clusterManagerNodes); return nodes.build(); } @@ -180,7 +180,7 @@ public ImmutableOpenMap getClusterManagerAndDataN * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerAndDataNodes()} */ @Deprecated - public ImmutableOpenMap getMasterAndDataNodes() { + public ImmutableOpenMap getMasterAndDataNodes() { return getClusterManagerAndDataNodes(); } @@ -189,8 +189,8 @@ public ImmutableOpenMap getMasterAndDataNodes() { * * @return {@link Map} of the coordinating only nodes arranged by their ids */ - public ImmutableOpenMap getCoordinatingOnlyNodes() { - ImmutableOpenMap.Builder nodes = ImmutableOpenMap.builder(this.nodes); + public ImmutableOpenMap getCoordinatingOnlyNodes() { + ImmutableOpenMap.Builder nodes = ImmutableOpenMap.builder(this.nodes); nodes.removeAll(clusterManagerNodes.keys()); nodes.removeAll(dataNodes.keys()); nodes.removeAll(ingestNodes.keys()); @@ -200,7 +200,7 @@ public ImmutableOpenMap getCoordinatingOnlyNodes( /** * Returns a stream of all nodes, with cluster-manager nodes at the front */ - public Stream clusterManagersFirstStream() { + public Stream clusterManagersFirstStream() { return Stream.concat( StreamSupport.stream(clusterManagerNodes.spliterator(), false).map(cur -> cur.value), StreamSupport.stream(this.spliterator(), false).filter(n -> n.isClusterManagerNode() == false) @@ -213,7 +213,7 @@ public Stream clusterManagersFirstStream() { * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagersFirstStream()} */ @Deprecated - public Stream mastersFirstStream() { + public Stream mastersFirstStream() { return clusterManagersFirstStream(); } @@ -223,7 +223,7 @@ public Stream mastersFirstStream() { * @param nodeId id of the wanted node * @return wanted node if it exists. Otherwise null */ - public ProtobufDiscoveryNode get(String nodeId) { + public DiscoveryNode get(String nodeId) { return nodes.get(nodeId); } @@ -243,8 +243,8 @@ public boolean nodeExists(String nodeId) { * @param node of the node which existence should be verified * @return true if the node exists. Otherwise false */ - public boolean nodeExists(ProtobufDiscoveryNode node) { - ProtobufDiscoveryNode existing = nodes.get(node.getId()); + public boolean nodeExists(DiscoveryNode node) { + DiscoveryNode existing = nodes.get(node.getId()); return existing != null && existing.equals(node); } @@ -252,8 +252,8 @@ public boolean nodeExists(ProtobufDiscoveryNode node) { * Determine if the given node exists and has the right roles. Supported roles vary by version, and our local cluster state might * have come via an older cluster-manager, so the roles may differ even if the node is otherwise identical. */ - public boolean nodeExistsWithSameRoles(ProtobufDiscoveryNode discoveryNode) { - final ProtobufDiscoveryNode existing = nodes.get(discoveryNode.getId()); + public boolean nodeExistsWithSameRoles(DiscoveryNode discoveryNode) { + final DiscoveryNode existing = nodes.get(discoveryNode.getId()); return existing != null && existing.equals(discoveryNode) && existing.getRoles().equals(discoveryNode.getRoles()); } @@ -291,7 +291,7 @@ public String getLocalNodeId() { * * @return local node */ - public ProtobufDiscoveryNode getLocalNode() { + public DiscoveryNode getLocalNode() { return nodes.get(localNodeId); } @@ -299,7 +299,7 @@ public ProtobufDiscoveryNode getLocalNode() { * Returns the cluster-manager node, or {@code null} if there is no cluster-manager node */ @Nullable - public ProtobufDiscoveryNode getClusterManagerNode() { + public DiscoveryNode getClusterManagerNode() { if (clusterManagerNodeId != null) { return nodes.get(clusterManagerNodeId); } @@ -313,7 +313,7 @@ public ProtobufDiscoveryNode getClusterManagerNode() { */ @Deprecated @Nullable - public ProtobufDiscoveryNode getMasterNode() { + public DiscoveryNode getMasterNode() { return getClusterManagerNode(); } @@ -323,9 +323,9 @@ public ProtobufDiscoveryNode getMasterNode() { * @param address {@link TransportAddress} of the wanted node * @return node identified by the given address or null if no such node exists */ - public ProtobufDiscoveryNode findByAddress(TransportAddress address) { - for (ObjectCursor cursor : nodes.values()) { - ProtobufDiscoveryNode node = cursor.value; + public DiscoveryNode findByAddress(TransportAddress address) { + for (ObjectCursor cursor : nodes.values()) { + DiscoveryNode node = cursor.value; if (node.getAddress().equals(address)) { return node; } @@ -380,7 +380,7 @@ public Version getMaxNodeVersion() { * @return discovered node matching the given id * @throws IllegalArgumentException if more than one node matches the request or no nodes have been resolved */ - public ProtobufDiscoveryNode resolveNode(String node) { + public DiscoveryNode resolveNode(String node) { String[] resolvedNodeIds = resolveNodes(node); if (resolvedNodeIds.length > 1) { throw new IllegalArgumentException( @@ -410,7 +410,7 @@ public ProtobufDiscoveryNode resolveNode(String node) { */ public String[] resolveNodes(String... nodes) { if (nodes == null || nodes.length == 0) { - return StreamSupport.stream(this.spliterator(), false).map(ProtobufDiscoveryNode::getId).toArray(String[]::new); + return StreamSupport.stream(this.spliterator(), false).map(DiscoveryNode::getId).toArray(String[]::new); } else { ObjectHashSet resolvedNodesIds = new ObjectHashSet<>(nodes.length); for (String nodeId : nodes) { @@ -431,7 +431,7 @@ public String[] resolveNodes(String... nodes) { } else if (nodeExists(nodeId)) { resolvedNodesIds.add(nodeId); } else { - for (ProtobufDiscoveryNode node : this) { + for (DiscoveryNode node : this) { if ("_all".equals(nodeId) || Regex.simpleMatch(nodeId, node.getName()) || Regex.simpleMatch(nodeId, node.getHostAddress()) @@ -461,14 +461,14 @@ public String[] resolveNodes(String... nodes) { } else { resolvedNodesIds.removeAll(ingestNodes.keys()); } - } else if (ProtobufDiscoveryNode.COORDINATING_ONLY.equals(matchAttrName)) { + } else if (DiscoveryNode.COORDINATING_ONLY.equals(matchAttrName)) { if (Booleans.parseBoolean(matchAttrValue, true)) { resolvedNodesIds.addAll(getCoordinatingOnlyNodes().keys()); } else { resolvedNodesIds.removeAll(getCoordinatingOnlyNodes().keys()); } } else { - for (ProtobufDiscoveryNode node : this) { + for (DiscoveryNode node : this) { for (DiscoveryNodeRole role : Sets.difference(node.getRoles(), DiscoveryNodeRole.BUILT_IN_ROLES)) { if (role.roleName().equals(matchAttrName)) { if (Booleans.parseBoolean(matchAttrValue, true)) { @@ -479,7 +479,7 @@ public String[] resolveNodes(String... nodes) { } } } - for (ProtobufDiscoveryNode node : this) { + for (DiscoveryNode node : this) { for (Map.Entry entry : node.getAttributes().entrySet()) { String attrName = entry.getKey(); String attrValue = entry.getValue(); @@ -496,7 +496,7 @@ public String[] resolveNodes(String... nodes) { } } - public ProtobufDiscoveryNodes newNode(ProtobufDiscoveryNode node) { + public ProtobufDiscoveryNodes newNode(DiscoveryNode node) { return new Builder(this).add(node).build(); } @@ -504,14 +504,14 @@ public ProtobufDiscoveryNodes newNode(ProtobufDiscoveryNode node) { * Returns the changes comparing this nodes to the provided nodes. */ public Delta delta(ProtobufDiscoveryNodes other) { - final List removed = new ArrayList<>(); - final List added = new ArrayList<>(); - for (ProtobufDiscoveryNode node : other) { + final List removed = new ArrayList<>(); + final List added = new ArrayList<>(); + for (DiscoveryNode node : other) { if (this.nodeExists(node) == false) { removed.add(node); } } - for (ProtobufDiscoveryNode node : this) { + for (DiscoveryNode node : this) { if (other.nodeExists(node) == false) { added.add(node); } @@ -530,7 +530,7 @@ public Delta delta(ProtobufDiscoveryNodes other) { public String toString() { StringBuilder sb = new StringBuilder(); sb.append("nodes: \n"); - for (ProtobufDiscoveryNode node : this) { + for (DiscoveryNode node : this) { sb.append(" ").append(node); if (node == getLocalNode()) { sb.append(", local"); @@ -552,18 +552,18 @@ public static class Delta { private final String localNodeId; @Nullable - private final ProtobufDiscoveryNode previousClusterManagerNode; + private final DiscoveryNode previousClusterManagerNode; @Nullable - private final ProtobufDiscoveryNode newClusterManagerNode; - private final List removed; - private final List added; + private final DiscoveryNode newClusterManagerNode; + private final List removed; + private final List added; private Delta( - @Nullable ProtobufDiscoveryNode previousClusterManagerNode, - @Nullable ProtobufDiscoveryNode newClusterManagerNode, + @Nullable DiscoveryNode previousClusterManagerNode, + @Nullable DiscoveryNode newClusterManagerNode, String localNodeId, - List removed, - List added + List removed, + List added ) { this.previousClusterManagerNode = previousClusterManagerNode; this.newClusterManagerNode = newClusterManagerNode; @@ -587,26 +587,26 @@ public boolean masterNodeChanged() { } @Nullable - public ProtobufDiscoveryNode previousClusterManagerNode() { + public DiscoveryNode previousClusterManagerNode() { return previousClusterManagerNode; } /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #previousClusterManagerNode()} */ @Deprecated @Nullable - public ProtobufDiscoveryNode previousMasterNode() { + public DiscoveryNode previousMasterNode() { return previousClusterManagerNode(); } @Nullable - public ProtobufDiscoveryNode newClusterManagerNode() { + public DiscoveryNode newClusterManagerNode() { return newClusterManagerNode; } /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #newClusterManagerNode()} */ @Deprecated @Nullable - public ProtobufDiscoveryNode newMasterNode() { + public DiscoveryNode newMasterNode() { return newClusterManagerNode(); } @@ -614,7 +614,7 @@ public boolean removed() { return !removed.isEmpty(); } - public List removedNodes() { + public List removedNodes() { return removed; } @@ -622,7 +622,7 @@ public boolean added() { return !added.isEmpty(); } - public List addedNodes() { + public List addedNodes() { return added; } @@ -648,7 +648,7 @@ public String shortSummary() { if (added()) { final String addedNodesExceptLocalNode = addedNodes().stream() .filter(node -> node.getId().equals(localNodeId) == false) - .map(ProtobufDiscoveryNode::toString) + .map(DiscoveryNode::toString) .collect(Collectors.joining(",")); if (addedNodesExceptLocalNode.length() > 0) { // ignore ourselves when reporting on nodes being added @@ -671,12 +671,12 @@ public void writeTo(CodedOutputStream out) throws IOException { out.writeStringNoTag(clusterManagerNodeId); } out.writeInt32NoTag(nodes.size()); - for (ProtobufDiscoveryNode node : this) { + for (DiscoveryNode node : this) { node.writeTo(out); } } - public static ProtobufDiscoveryNodes readFrom(CodedInputStream in, ProtobufDiscoveryNode localNode) throws IOException { + public static ProtobufDiscoveryNodes readFrom(CodedInputStream in, DiscoveryNode localNode) throws IOException { Builder builder = new Builder(); if (in.readBool()) { builder.clusterManagerNodeId(in.readString()); @@ -686,7 +686,7 @@ public static ProtobufDiscoveryNodes readFrom(CodedInputStream in, ProtobufDisco } int size = in.readInt32(); for (int i = 0; i < size; i++) { - ProtobufDiscoveryNode node = new ProtobufDiscoveryNode(in); + DiscoveryNode node = new DiscoveryNode(in); if (localNode != null && node.getId().equals(localNode.getId())) { // reuse the same instance of our address and local node id for faster equality node = localNode; @@ -699,8 +699,7 @@ public static ProtobufDiscoveryNodes readFrom(CodedInputStream in, ProtobufDisco return builder.build(); } - public static ProtobufDiff readDiffFrom(CodedInputStream in, ProtobufDiscoveryNode localNode) - throws IOException { + public static ProtobufDiff readDiffFrom(CodedInputStream in, DiscoveryNode localNode) throws IOException { return ProtobufAbstractDiffable.readDiffFrom(in1 -> readFrom(in1, localNode), in); } @@ -719,7 +718,7 @@ public static Builder builder(ProtobufDiscoveryNodes nodes) { */ public static class Builder { - private final ImmutableOpenMap.Builder nodes; + private final ImmutableOpenMap.Builder nodes; private String clusterManagerNodeId; private String localNodeId; @@ -735,9 +734,9 @@ public Builder(ProtobufDiscoveryNodes nodes) { /** * adds a disco node to the builder. Will throw an {@link IllegalArgumentException} if - * the supplied node doesn't pass the pre-flight checks performed by {@link #validateAdd(ProtobufDiscoveryNode)} + * the supplied node doesn't pass the pre-flight checks performed by {@link #validateAdd(DiscoveryNode)} */ - public Builder add(ProtobufDiscoveryNode node) { + public Builder add(DiscoveryNode node) { final String preflight = validateAdd(node); if (preflight != null) { throw new IllegalArgumentException(preflight); @@ -753,11 +752,11 @@ public Builder add(ProtobufDiscoveryNode node) { * @return wanted node if it exists. Otherwise null */ @Nullable - public ProtobufDiscoveryNode get(String nodeId) { + public DiscoveryNode get(String nodeId) { return nodes.get(nodeId); } - private void putUnsafe(ProtobufDiscoveryNode node) { + private void putUnsafe(DiscoveryNode node) { nodes.put(node.getId(), node); } @@ -766,7 +765,7 @@ public Builder remove(String nodeId) { return this; } - public Builder remove(ProtobufDiscoveryNode node) { + public Builder remove(DiscoveryNode node) { if (node.equals(nodes.get(node.getId()))) { nodes.remove(node.getId()); } @@ -794,12 +793,12 @@ public Builder localNodeId(String localNodeId) { * * @return null if all is OK or an error message explaining why a node can not be added. * - * Note: if this method returns a non-null value, calling {@link #add(ProtobufDiscoveryNode)} will fail with an + * Note: if this method returns a non-null value, calling {@link #add(DiscoveryNode)} will fail with an * exception */ - private String validateAdd(ProtobufDiscoveryNode node) { - for (ObjectCursor cursor : nodes.values()) { - final ProtobufDiscoveryNode existingNode = cursor.value; + private String validateAdd(DiscoveryNode node) { + for (ObjectCursor cursor : nodes.values()) { + final DiscoveryNode existingNode = cursor.value; if (node.getAddress().equals(existingNode.getAddress()) && node.getId().equals(existingNode.getId()) == false) { return "can't add node " + node + ", found existing node " + existingNode + " with same address"; } @@ -815,14 +814,14 @@ private String validateAdd(ProtobufDiscoveryNode node) { } public ProtobufDiscoveryNodes build() { - ImmutableOpenMap.Builder dataNodesBuilder = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder clusterManagerNodesBuilder = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder ingestNodesBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder dataNodesBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder clusterManagerNodesBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder ingestNodesBuilder = ImmutableOpenMap.builder(); Version minNodeVersion = null; Version maxNodeVersion = null; Version minNonClientNodeVersion = null; Version maxNonClientNodeVersion = null; - for (ObjectObjectCursor nodeEntry : nodes) { + for (ObjectObjectCursor nodeEntry : nodes) { if (nodeEntry.value.isDataNode()) { dataNodesBuilder.put(nodeEntry.key, nodeEntry.value); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/AllocationId.java b/server/src/main/java/org/opensearch/cluster/routing/AllocationId.java index 0bc434090b719..3965f1f72f668 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/AllocationId.java +++ b/server/src/main/java/org/opensearch/cluster/routing/AllocationId.java @@ -38,11 +38,17 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.Objects; @@ -57,7 +63,7 @@ * * @opensearch.internal */ -public class AllocationId implements ToXContentObject, Writeable { +public class AllocationId implements ToXContentObject, Writeable, ProtobufWriteable { private static final String ID_KEY = "id"; private static final String RELOCATION_ID_KEY = "relocation_id"; @@ -94,12 +100,25 @@ public AllocationId build() { this.relocationId = in.readOptionalString(); } + AllocationId(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + this.id = in.readString(); + this.relocationId = protobufStreamInput.readOptionalString(); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(this.id); out.writeOptionalString(this.relocationId); } + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + out.writeStringNoTag(this.id); + protobufStreamOutput.writeOptionalString(this.relocationId); + } + private AllocationId(String id, String relocationId) { Objects.requireNonNull(id, "Argument [id] must be non-null"); this.id = id; diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index 781ca5bb2255a..bbb3ee61d719d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -32,6 +32,8 @@ package org.opensearch.cluster.routing; +import com.google.protobuf.CodedInputStream; + import org.apache.lucene.util.CollectionUtil; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; @@ -347,6 +349,18 @@ public static IndexRoutingTable readFrom(StreamInput in) throws IOException { return builder.build(); } + public static IndexRoutingTable readFrom(CodedInputStream in) throws IOException { + Index index = new Index(in); + Builder builder = new Builder(index); + + int size = in.readInt32(); + for (int i = 0; i < size; i++) { + builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index)); + } + + return builder.build(); + } + public static Diff readDiffFrom(StreamInput in) throws IOException { return readDiffFrom(IndexRoutingTable::readFrom, in); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index f9f57eeda4e98..423b841f0939d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -47,6 +47,8 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.node.ResponseCollectorService; +import com.google.protobuf.CodedInputStream; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -1050,6 +1052,20 @@ public static IndexShardRoutingTable readFromThin(StreamInput in, Index index) t return builder.build(); } + public static IndexShardRoutingTable readFromThin(CodedInputStream in, Index index) throws IOException { + int iShardId = in.readInt32(); + ShardId shardId = new ShardId(index, iShardId); + Builder builder = new Builder(shardId); + + int size = in.readInt32(); + for (int i = 0; i < size; i++) { + ShardRouting shard = new ShardRouting(shardId, in); + builder.addShard(shard); + } + + return builder.build(); + } + public static void writeTo(IndexShardRoutingTable indexShard, StreamOutput out) throws IOException { indexShard.shardId().getIndex().writeTo(out); writeToThin(indexShard, out); diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java index d6a67bc714689..9c8328f5a529c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java @@ -32,6 +32,8 @@ package org.opensearch.cluster.routing; +import com.google.protobuf.CodedInputStream; + import org.opensearch.cluster.Diff; import org.opensearch.cluster.Diffable; import org.opensearch.cluster.DiffableUtils; @@ -382,6 +384,18 @@ public static RoutingTable readFrom(StreamInput in) throws IOException { return builder.build(); } + public static RoutingTable readFrom(CodedInputStream in) throws IOException { + Builder builder = new Builder(); + builder.version = in.readInt64(); + int size = in.readInt32(); + for (int i = 0; i < size; i++) { + IndexRoutingTable index = IndexRoutingTable.readFrom(in); + builder.add(index); + } + + return builder.build(); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java index aa893ec735bac..f047ad4d60310 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java @@ -36,6 +36,9 @@ import org.opensearch.cluster.routing.RecoverySource.PeerRecoverySource; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,6 +47,9 @@ import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.Collections; import java.util.List; @@ -54,7 +60,7 @@ * * @opensearch.internal */ -public class ShardRouting implements Writeable, ToXContentObject { +public class ShardRouting implements Writeable, ToXContentObject, ProtobufWriteable { /** * Used if shard size is not available @@ -323,10 +329,35 @@ public ShardRouting(ShardId shardId, StreamInput in) throws IOException { targetRelocatingShard = initializeTargetRelocatingShard(); } + public ShardRouting(ShardId shardId, CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + this.shardId = shardId; + currentNodeId = protobufStreamInput.readOptionalString(); + relocatingNodeId = protobufStreamInput.readOptionalString(); + primary = in.readBool(); + state = ShardRoutingState.fromValue(in.readRawByte()); + recoverySource = null; + unassignedInfo = protobufStreamInput.readOptionalWriteable(UnassignedInfo::new); + allocationId = protobufStreamInput.readOptionalWriteable(AllocationId::new); + final long shardSize; + if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING) { + shardSize = in.readInt64(); + } else { + shardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; + } + expectedShardSize = shardSize; + asList = Collections.singletonList(this); + targetRelocatingShard = initializeTargetRelocatingShard(); + } + public ShardRouting(StreamInput in) throws IOException { this(new ShardId(in), in); } + public ShardRouting(CodedInputStream in) throws IOException { + this(new ShardId(in), in); + } + /** * Writes shard information to {@link StreamOutput} without writing index name and shard id * @@ -348,12 +379,40 @@ public void writeToThin(StreamOutput out) throws IOException { } } + /** + * Writes shard information to without writing index name and shard id + * + * @param out to write shard information to + * @throws IOException if something happens during write + */ + public void writeToThin(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + protobufStreamOutput.writeOptionalString(currentNodeId); + protobufStreamOutput.writeOptionalString(relocatingNodeId); + out.writeBoolNoTag(primary); + out.write(state.value()); + // if (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) { + // recoverySource.writeTo(out); + // } + protobufStreamOutput.writeOptionalWriteable(unassignedInfo); + protobufStreamOutput.writeOptionalWriteable(allocationId); + if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING) { + out.writeInt64NoTag(expectedShardSize); + } + } + @Override public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); writeToThin(out); } + @Override + public void writeTo(CodedOutputStream out) throws IOException { + shardId.writeTo(out); + writeToThin(out); + } + public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource) { assert this.unassignedInfo != null : "can only update unassign info if they are already set"; assert this.unassignedInfo.isDelayed() || (unassignedInfo.isDelayed() == false) : "cannot transition from non-delayed to delayed"; diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index a4ecbadc34702..3de86101e3de3 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -38,6 +38,9 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -49,10 +52,14 @@ import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.time.Instant; import java.time.ZoneOffset; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Objects; @@ -64,7 +71,7 @@ * * @opensearch.internal */ -public final class UnassignedInfo implements ToXContentFragment, Writeable { +public final class UnassignedInfo implements ToXContentFragment, Writeable, ProtobufWriteable { public static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("date_optional_time").withZone(ZoneOffset.UTC); @@ -159,7 +166,7 @@ public enum Reason { * * @opensearch.internal */ - public enum AllocationStatus implements Writeable { + public enum AllocationStatus implements Writeable, ProtobufWriteable { /** * The shard was denied allocation to a node because the allocation deciders all returned a NO decision */ @@ -197,6 +204,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(id); } + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.write(id); + } + public static AllocationStatus readFrom(StreamInput in) throws IOException { byte id = in.readByte(); switch (id) { @@ -217,6 +229,26 @@ public static AllocationStatus readFrom(StreamInput in) throws IOException { } } + public static AllocationStatus readFrom(CodedInputStream in) throws IOException { + byte id = in.readRawByte(); + switch (id) { + case 0: + return DECIDERS_NO; + case 1: + return NO_VALID_SHARD_COPY; + case 2: + return DECIDERS_THROTTLED; + case 3: + return FETCHING_SHARD_DATA; + case 4: + return DELAYED_ALLOCATION; + case 5: + return NO_ATTEMPT; + default: + throw new IllegalArgumentException("Unknown AllocationStatus value [" + id + "]"); + } + } + public static AllocationStatus fromDecision(Decision.Type decision) { Objects.requireNonNull(decision); switch (decision) { @@ -316,6 +348,21 @@ public UnassignedInfo(StreamInput in) throws IOException { this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); } + public UnassignedInfo(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + this.reason = Reason.values()[(int) in.readRawByte()]; + this.unassignedTimeMillis = in.readInt64(); + // As System.nanoTime() cannot be compared across different JVMs, reset it to now. + // This means that in cluster-manager fail-over situations, elapsed delay time is forgotten. + this.unassignedTimeNanos = System.nanoTime(); + this.delayed = in.readBool(); + this.message = protobufStreamInput.readOptionalString(); + this.failure = protobufStreamInput.readException(); + this.failedAllocations = in.readInt32(); + this.lastAllocationStatus = AllocationStatus.readFrom(in); + this.failedNodeIds = protobufStreamInput.readCollection(CodedInputStream::readString, HashSet::new, Collections.emptySet()); + } + public void writeTo(StreamOutput out) throws IOException { out.writeByte((byte) reason.ordinal()); out.writeLong(unassignedTimeMillis); @@ -328,6 +375,19 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(failedNodeIds, StreamOutput::writeString); } + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + out.write((byte) reason.ordinal()); + out.writeInt64NoTag(unassignedTimeMillis); + // Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs + out.writeBoolNoTag(delayed); + protobufStreamOutput.writeOptionalString(message); + protobufStreamOutput.writeException(failure); + out.writeInt32NoTag(failedAllocations); + lastAllocationStatus.writeTo(out); + protobufStreamOutput.writeCollection(failedNodeIds, CodedOutputStream::writeStringNoTag); + } + /** * Returns the number of previously failed allocations of this shard. */ diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplier.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplier.java index 939feb89b6054..bb03e04568116 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplier.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplier.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.service; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ProtobufClusterState; import java.util.function.Supplier; @@ -48,6 +49,12 @@ public interface ClusterApplier { */ void setInitialState(ClusterState initialState); + /** + * Sets the initial state for this applier. Should only be called once. + * @param initialState the initial state to set + */ + void setInitialProtobufState(ProtobufClusterState initialState); + /** * Method to invoke when a new cluster state is available to be applied * diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 57db883a337e8..2b3067b804820 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -153,6 +153,15 @@ public void setInitialState(ClusterState initialState) { state.set(initialState); } + @Override + public void setInitialProtobufState(ProtobufClusterState initialState) { + if (lifecycle.started()) { + throw new IllegalStateException("can't set initial state when started"); + } + assert protobufState.get() == null : "state is already set"; + protobufState.set(initialState); + } + @Override protected synchronized void doStart() { Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting"); diff --git a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java index 7742e4578744c..4caed83b07941 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java +++ b/server/src/main/java/org/opensearch/common/io/stream/ProtobufStreamInput.java @@ -290,7 +290,7 @@ public List readList(final ProtobufWriteable.Reader reader) throws IOE /** * Reads a collection of objects */ - private > C readCollection(ProtobufWriteable.Reader reader, IntFunction constructor, C empty) + public > C readCollection(ProtobufWriteable.Reader reader, IntFunction constructor, C empty) throws IOException { int count = readArraySize(); if (count == 0) { diff --git a/server/src/main/java/org/opensearch/common/logging/ProtobufNodeAndClusterIdStateListener.java b/server/src/main/java/org/opensearch/common/logging/ProtobufNodeAndClusterIdStateListener.java index 4b1be0914747d..a2d9aae67db6c 100644 --- a/server/src/main/java/org/opensearch/common/logging/ProtobufNodeAndClusterIdStateListener.java +++ b/server/src/main/java/org/opensearch/common/logging/ProtobufNodeAndClusterIdStateListener.java @@ -36,7 +36,10 @@ public static void getAndSetNodeIdAndClusterId(ClusterService clusterService, Th ProtobufClusterState clusterState = clusterService.protobufState(); ProtobufClusterStateObserver observer = new ProtobufClusterStateObserver(clusterState, clusterService, null, logger, threadContext); - observer.waitForNextChange(new ProtobufNodeAndClusterIdStateListener(), ProtobufNodeAndClusterIdStateListener::isNodeAndClusterIdPresent); + observer.waitForNextChange( + new ProtobufNodeAndClusterIdStateListener(), + ProtobufNodeAndClusterIdStateListener::isNodeAndClusterIdPresent + ); } private static boolean isNodeAndClusterIdPresent(ProtobufClusterState clusterState) { diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index c9ed9d59cda05..ba348e89ed73b 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -304,9 +304,9 @@ public AsyncSender interceptSender(AsyncSender sender) { } } - /** - * Registers a new {@link TransportInterceptor} - */ + /** + * Registers a new {@link TransportInterceptor} + */ private void registerProtobufTransportInterceptor(ProtobufTransportInterceptor interceptor) { this.protobufTransportInterceptors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); } diff --git a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java index 4308e1c3e98fa..dd5502054684a 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java @@ -233,7 +233,6 @@ public boolean registerDynamicSetting(Setting setting) { * the setting during startup. */ private void registerSetting(Setting setting) { - System.out.println("Registering setting: " + setting.getKey()); if (setting.isFiltered()) { if (settingsFilterPattern.contains(setting.getKey()) == false) { registerSettingsFilter(setting.getKey()); diff --git a/server/src/main/java/org/opensearch/common/transport/ProtobufBoundTransportAddress.java b/server/src/main/java/org/opensearch/common/transport/ProtobufBoundTransportAddress.java index 97de0d42b5592..96ca6719491f7 100644 --- a/server/src/main/java/org/opensearch/common/transport/ProtobufBoundTransportAddress.java +++ b/server/src/main/java/org/opensearch/common/transport/ProtobufBoundTransportAddress.java @@ -16,6 +16,7 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.common.network.InetAddresses; import java.io.IOException; @@ -65,4 +66,27 @@ public void writeTo(CodedOutputStream out) throws IOException { } publishAddress.writeTo(out); } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder("publish_address {"); + String hostString = publishAddress.address().getHostString(); + String publishAddressString = publishAddress.toString(); + if (InetAddresses.isInetAddress(hostString) == false) { + publishAddressString = hostString + '/' + publishAddress.toString(); + } + builder.append(publishAddressString); + builder.append("}, bound_addresses "); + boolean firstAdded = false; + for (ProtobufTransportAddress address : boundAddresses) { + if (firstAdded) { + builder.append(", "); + } else { + firstAdded = true; + } + + builder.append("{").append(address).append("}"); + } + return builder.toString(); + } } diff --git a/server/src/main/java/org/opensearch/common/transport/ProtobufTransportAddress.java b/server/src/main/java/org/opensearch/common/transport/ProtobufTransportAddress.java index 274c8404e11af..aa34a2f0ec99f 100644 --- a/server/src/main/java/org/opensearch/common/transport/ProtobufTransportAddress.java +++ b/server/src/main/java/org/opensearch/common/transport/ProtobufTransportAddress.java @@ -16,6 +16,9 @@ import com.google.protobuf.CodedOutputStream; import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.common.network.NetworkAddress; +import org.opensearch.core.xcontent.ToXContent.Params; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.net.InetAddress; @@ -27,7 +30,7 @@ * * @opensearch.internal */ -public final class ProtobufTransportAddress implements ProtobufWriteable { +public final class ProtobufTransportAddress implements ProtobufWriteable, ToXContentFragment { /** * A non-routeable v4 meta transport address that can be used for @@ -105,4 +108,27 @@ public int getPort() { public InetSocketAddress address() { return this.address; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ProtobufTransportAddress address1 = (ProtobufTransportAddress) o; + return address.equals(address1.address); + } + + @Override + public int hashCode() { + return address != null ? address.hashCode() : 0; + } + + @Override + public String toString() { + return NetworkAddress.format(address); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/opensearch/discovery/Discovery.java b/server/src/main/java/org/opensearch/discovery/Discovery.java index 9d6807b6522c9..9418632d52e1f 100644 --- a/server/src/main/java/org/opensearch/discovery/Discovery.java +++ b/server/src/main/java/org/opensearch/discovery/Discovery.java @@ -49,6 +49,11 @@ public interface Discovery extends LifecycleComponent, ClusterStatePublisher { */ DiscoveryStats stats(); + /** + * @return stats about the discovery + */ + ProtobufDiscoveryStats protobufStats(); + /** * Triggers the first join cycle */ diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index 1d967d9fb135a..f82f380f5858b 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -46,6 +46,8 @@ import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.transport.NetworkExceptionHelper; import org.opensearch.common.transport.PortsRange; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.common.util.BigArrays; @@ -155,11 +157,35 @@ public HttpInfo info() { return new HttpInfo(boundTransportAddress, maxContentLength.getBytes()); } + @Override + public ProtobufHttpInfo protobufInfo() { + BoundTransportAddress boundTransportAddress = boundAddress(); + if (boundTransportAddress == null) { + return null; + } + TransportAddress[] transportAddress = boundTransportAddress.boundAddresses(); + TransportAddress publishAddress = boundTransportAddress.publishAddress(); + ProtobufTransportAddress[] protobufTransportAddresses = new ProtobufTransportAddress[transportAddress.length]; + for (int i = 0; i < transportAddress.length; i++) { + protobufTransportAddresses[i] = new ProtobufTransportAddress(transportAddress[i].address()); + } + ProtobufBoundTransportAddress protobufBoundTransportAddress = new ProtobufBoundTransportAddress( + protobufTransportAddresses, + new ProtobufTransportAddress(publishAddress.address()) + ); + return new ProtobufHttpInfo(protobufBoundTransportAddress, maxContentLength.getBytes()); + } + @Override public HttpStats stats() { return new HttpStats(httpChannels.size(), totalChannelsAccepted.get()); } + @Override + public ProtobufHttpStats protobufStats() { + return new ProtobufHttpStats(httpChannels.size(), totalChannelsAccepted.get()); + } + protected void bindServer() { // Bind and start to accept incoming connections. InetAddress hostAddresses[]; @@ -368,7 +394,6 @@ void dispatchRequest(final RestRequest restRequest, final RestChannel channel, f } private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel, final Exception exception) { - System.out.println("HttpServerTransport.handleIncomingRequest"); if (exception == null) { HttpResponse earlyResponse = corsHandler.handleInbound(httpRequest); if (earlyResponse != null) { @@ -442,7 +467,6 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan channel = innerChannel; } - System.out.println("RestRequest " + restRequest); dispatchRequest(restRequest, channel, badRequestCause); } diff --git a/server/src/main/java/org/opensearch/http/HttpServerTransport.java b/server/src/main/java/org/opensearch/http/HttpServerTransport.java index 6549f0786fcda..0d4ac79535c25 100644 --- a/server/src/main/java/org/opensearch/http/HttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/HttpServerTransport.java @@ -35,6 +35,7 @@ import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.node.ProtobufReportingService; import org.opensearch.node.ReportingService; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; @@ -44,7 +45,7 @@ * * @opensearch.internal */ -public interface HttpServerTransport extends LifecycleComponent, ReportingService { +public interface HttpServerTransport extends LifecycleComponent, ReportingService, ProtobufReportingService { String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker"; @@ -53,8 +54,13 @@ public interface HttpServerTransport extends LifecycleComponent, ReportingServic @Override HttpInfo info(); + @Override + ProtobufHttpInfo protobufInfo(); + HttpStats stats(); + ProtobufHttpStats protobufStats(); + /** * Dispatches HTTP requests. */ diff --git a/server/src/main/java/org/opensearch/http/ProtobufHttpInfo.java b/server/src/main/java/org/opensearch/http/ProtobufHttpInfo.java index 612c98e7df7e8..fb893000f68ff 100644 --- a/server/src/main/java/org/opensearch/http/ProtobufHttpInfo.java +++ b/server/src/main/java/org/opensearch/http/ProtobufHttpInfo.java @@ -15,8 +15,13 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; + +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.network.InetAddresses; import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.ProtobufReportingService; import java.io.IOException; @@ -28,6 +33,11 @@ */ public class ProtobufHttpInfo implements ProtobufReportingService.ProtobufInfo { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ProtobufHttpInfo.class); + + /** Deprecated property, just here for deprecation logging in 7.x. */ + private static final boolean CNAME_IN_PUBLISH_HOST = System.getProperty("opensearch.http.cname_in_publish_address") != null; + private final ProtobufBoundTransportAddress address; private final long maxContentLength; @@ -61,4 +71,35 @@ public ByteSizeValue maxContentLength() { public ByteSizeValue getMaxContentLength() { return maxContentLength(); } + + static final class Fields { + static final String HTTP = "http"; + static final String BOUND_ADDRESS = "bound_address"; + static final String PUBLISH_ADDRESS = "publish_address"; + static final String MAX_CONTENT_LENGTH = "max_content_length"; + static final String MAX_CONTENT_LENGTH_IN_BYTES = "max_content_length_in_bytes"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.HTTP); + builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses()); + ProtobufTransportAddress publishAddress = address.publishAddress(); + String publishAddressString = publishAddress.toString(); + String hostString = publishAddress.address().getHostString(); + if (CNAME_IN_PUBLISH_HOST) { + deprecationLogger.deprecate( + "cname_in_publish_address", + "opensearch.http.cname_in_publish_address system property is deprecated and no longer affects http.publish_address " + + "formatting. Remove this property to get rid of this deprecation warning." + ); + } + if (InetAddresses.isInetAddress(hostString) == false) { + publishAddressString = hostString + '/' + publishAddress.toString(); + } + builder.field(Fields.PUBLISH_ADDRESS, publishAddressString); + builder.humanReadableField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength()); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java b/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java index c3c552b5f732d..20724474e5fb5 100644 --- a/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java +++ b/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java @@ -52,6 +52,10 @@ public RequestCacheStats stats() { return new RequestCacheStats(totalMetric.count(), evictionsMetric.count(), hitCount.count(), missCount.count()); } + public ProtobufRequestCacheStats protobufStats() { + return new ProtobufRequestCacheStats(totalMetric.count(), evictionsMetric.count(), hitCount.count(), missCount.count()); + } + public void onHit() { hitCount.inc(); } diff --git a/server/src/main/java/org/opensearch/index/engine/CommitStats.java b/server/src/main/java/org/opensearch/index/engine/CommitStats.java index 2c78f675c5154..d52342742a442 100644 --- a/server/src/main/java/org/opensearch/index/engine/CommitStats.java +++ b/server/src/main/java/org/opensearch/index/engine/CommitStats.java @@ -33,6 +33,9 @@ import org.apache.lucene.index.SegmentInfos; import org.opensearch.common.collect.MapBuilder; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -40,6 +43,9 @@ import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.Base64; import java.util.Map; @@ -49,7 +55,7 @@ * * @opensearch.internal */ -public final class CommitStats implements Writeable, ToXContentFragment { +public final class CommitStats implements Writeable, ToXContentFragment, ProtobufWriteable { private final Map userData; private final long generation; @@ -76,10 +82,27 @@ public CommitStats(SegmentInfos segmentInfos) { numDocs = in.readInt(); } + CommitStats(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + MapBuilder builder = MapBuilder.newMapBuilder(); + for (int i = in.readInt32(); i > 0; i--) { + builder.put(in.readString(), in.readString()); + } + userData = builder.immutableMap(); + generation = in.readInt64(); + id = protobufStreamInput.readOptionalString(); + numDocs = in.readInt32(); + } + public static CommitStats readOptionalCommitStatsFrom(StreamInput in) throws IOException { return in.readOptionalWriteable(CommitStats::new); } + public static CommitStats readOptionalCommitStatsFromProtobuf(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + return protobufStreamInput.readOptionalWriteable(CommitStats::new); + } + public Map getUserData() { return userData; } @@ -112,6 +135,19 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(numDocs); } + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + out.writeInt32NoTag(userData.size()); + for (Map.Entry entry : userData.entrySet()) { + out.writeStringNoTag(entry.getKey()); + out.writeStringNoTag(entry.getValue()); + } + out.writeInt64NoTag(generation); + protobufStreamOutput.writeOptionalString(id); + out.writeInt32NoTag(numDocs); + } + /** * Fields used for parsing and toXContent * diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index 74c9c25dc4c1f..1c878a4a78238 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -81,10 +81,12 @@ import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.merge.MergeStats; +import org.opensearch.index.merge.ProtobufMergeStats; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.DocsStats; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.ProtobufDocsStats; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogManager; @@ -205,6 +207,10 @@ public MergeStats getMergeStats() { return new MergeStats(); } + public ProtobufMergeStats getProtobufMergeStats() { + return new ProtobufMergeStats(); + } + /** returns the history uuid for the engine */ public abstract String getHistoryUUID(); @@ -262,6 +268,41 @@ protected final DocsStats docsStats(IndexReader indexReader) { return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); } + /** + * Returns the {@link DocsStats} for this engine + */ + public ProtobufDocsStats protobufDocsStats() { + // we calculate the doc stats based on the internal searcher that is more up-to-date and not subject + // to external refreshes. For instance we don't refresh an external searcher if we flush and indices with + // index.refresh_interval=-1 won't see any doc stats updates at all. This change will give more accurate statistics + // when indexing but not refreshing in general. Yet, if a refresh happens the internal searcher is refresh as well so we are + // safe here. + try (Searcher searcher = acquireSearcher("docStats", SearcherScope.INTERNAL)) { + return protobufDocsStats(searcher.getIndexReader()); + } + } + + protected final ProtobufDocsStats protobufDocsStats(IndexReader indexReader) { + long numDocs = 0; + long numDeletedDocs = 0; + long sizeInBytes = 0; + // we don't wait for a pending refreshes here since it's a stats call instead we mark it as accessed only which will cause + // the next scheduled refresh to go through and refresh the stats as well + for (LeafReaderContext readerContext : indexReader.leaves()) { + // we go on the segment level here to get accurate numbers + final SegmentReader segmentReader = Lucene.segmentReader(readerContext.reader()); + SegmentCommitInfo info = segmentReader.getSegmentInfo(); + numDocs += readerContext.reader().numDocs(); + numDeletedDocs += readerContext.reader().numDeletedDocs(); + try { + sizeInBytes += info.sizeInBytes(); + } catch (IOException e) { + logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); + } + } + return new ProtobufDocsStats(numDocs, numDeletedDocs, sizeInBytes); + } + /** * Performs the pre-closing checks on the {@link Engine}. * @@ -926,6 +967,34 @@ public SegmentsStats segmentsStats(boolean includeSegmentFileSizes, boolean incl return stats; } + /** + * Global stats on segments. + */ + public ProtobufSegmentsStats protobufSegmentsStats(boolean includeSegmentFileSizes, boolean includeUnloadedSegments) { + ensureOpen(); + Set segmentName = new HashSet<>(); + ProtobufSegmentsStats stats = new ProtobufSegmentsStats(); + try (Searcher searcher = acquireSearcher("segments_stats", SearcherScope.INTERNAL)) { + for (LeafReaderContext ctx : searcher.getIndexReader().getContext().leaves()) { + SegmentReader segmentReader = Lucene.segmentReader(ctx.reader()); + protobufFillSegmentStats(segmentReader, includeSegmentFileSizes, stats); + segmentName.add(segmentReader.getSegmentName()); + } + } + + try (Searcher searcher = acquireSearcher("segments_stats", SearcherScope.EXTERNAL)) { + for (LeafReaderContext ctx : searcher.getIndexReader().getContext().leaves()) { + SegmentReader segmentReader = Lucene.segmentReader(ctx.reader()); + if (segmentName.contains(segmentReader.getSegmentName()) == false) { + protobufFillSegmentStats(segmentReader, includeSegmentFileSizes, stats); + } + } + } + stats.addVersionMapMemoryInBytes(0); + stats.addIndexWriterMemoryInBytes(0); + return stats; + } + protected TranslogDeletionPolicy getTranslogDeletionPolicy(EngineConfig engineConfig) { TranslogDeletionPolicy customTranslogDeletionPolicy = null; if (engineConfig.getCustomTranslogDeletionPolicyFactory() != null) { @@ -950,6 +1019,14 @@ protected void fillSegmentStats(SegmentReader segmentReader, boolean includeSegm } } + protected void protobufFillSegmentStats(SegmentReader segmentReader, boolean includeSegmentFileSizes, ProtobufSegmentsStats stats) { + stats.add(1); + if (includeSegmentFileSizes) { + // TODO: consider moving this to StoreStats + stats.addFileSizes(getSegmentFileSizes(segmentReader)); + } + } + private Map getSegmentFileSizes(SegmentReader segmentReader) { Directory directory = null; SegmentCommitInfo segmentCommitInfo = segmentReader.getSegmentInfo(); diff --git a/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java index 138b417571784..b786e625228a6 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java @@ -34,6 +34,7 @@ import org.apache.lucene.util.Accountable; import org.opensearch.common.FieldMemoryStats; +import org.opensearch.common.ProtobufFieldMemoryStats; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.regex.Regex; import org.opensearch.common.util.CollectionUtils; @@ -72,6 +73,23 @@ public FieldDataStats stats(String... fields) { ); } + public ProtobufFieldDataStats protobufStats(String... fields) { + ObjectLongHashMap fieldTotals = null; + if (CollectionUtils.isEmpty(fields) == false) { + fieldTotals = new ObjectLongHashMap<>(); + for (Map.Entry entry : perFieldTotals.entrySet()) { + if (Regex.simpleMatch(fields, entry.getKey())) { + fieldTotals.put(entry.getKey(), entry.getValue().count()); + } + } + } + return new ProtobufFieldDataStats( + totalMetric.count(), + evictionsMetric.count(), + fieldTotals == null ? null : new ProtobufFieldMemoryStats(fieldTotals) + ); + } + @Override public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) { totalMetric.inc(ramUsage.ramBytesUsed()); diff --git a/server/src/main/java/org/opensearch/index/get/ShardGetService.java b/server/src/main/java/org/opensearch/index/get/ShardGetService.java index 8f81e704ef2d4..86df8a72ab18c 100644 --- a/server/src/main/java/org/opensearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/opensearch/index/get/ShardGetService.java @@ -113,6 +113,16 @@ public GetStats stats() { ); } + public ProtobufGetStats protobufStats() { + return new ProtobufGetStats( + existsMetric.count(), + TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), + missingMetric.count(), + TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), + currentMetric.count() + ); + } + public GetResult get( String id, String[] gFields, diff --git a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java index 6d0eb3a5949ca..a1a96a367e4f2 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java @@ -84,6 +84,31 @@ public SearchStats stats(String... groups) { return new SearchStats(total, openContexts.count(), groupsSt); } + /** + * Returns the stats, including group specific stats. If the groups are null/0 length, then nothing + * is returned for them. If they are set, then only groups provided will be returned, or + * {@code _all} for all groups. + */ + public ProtobufSearchStats protobufStats(String... groups) { + ProtobufSearchStats.Stats total = totalStats.protobufStats(); + Map groupsSt = null; + if (CollectionUtils.isEmpty(groups) == false) { + groupsSt = new HashMap<>(groupsStats.size()); + if (groups.length == 1 && groups[0].equals("_all")) { + for (Map.Entry entry : groupsStats.entrySet()) { + groupsSt.put(entry.getKey(), entry.getValue().protobufStats()); + } + } else { + for (Map.Entry entry : groupsStats.entrySet()) { + if (Regex.simpleMatch(groups, entry.getKey())) { + groupsSt.put(entry.getKey(), entry.getValue().protobufStats()); + } + } + } + } + return new ProtobufSearchStats(total, openContexts.count(), groupsSt); + } + @Override public void onPreQueryPhase(SearchContext searchContext) { computeStats(searchContext, statsHolder -> { @@ -242,5 +267,25 @@ SearchStats.Stats stats() { suggestCurrent.count() ); } + + ProtobufSearchStats.Stats protobufStats() { + return new ProtobufSearchStats.Stats( + queryMetric.count(), + TimeUnit.NANOSECONDS.toMillis(queryMetric.sum()), + queryCurrent.count(), + fetchMetric.count(), + TimeUnit.NANOSECONDS.toMillis(fetchMetric.sum()), + fetchCurrent.count(), + scrollMetric.count(), + TimeUnit.MICROSECONDS.toMillis(scrollMetric.sum()), + scrollCurrent.count(), + pitMetric.count(), + TimeUnit.MICROSECONDS.toMillis(pitMetric.sum()), + pitCurrent.count(), + suggestMetric.count(), + TimeUnit.NANOSECONDS.toMillis(suggestMetric.sum()), + suggestCurrent.count() + ); + } } } diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java index 5d4483e4a2930..27e4aa2285e6e 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java @@ -33,6 +33,7 @@ package org.opensearch.index.seqno; import org.opensearch.core.ParseField; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -40,6 +41,10 @@ import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import org.opensearch.core.xcontent.XContent; import java.io.IOException; @@ -53,7 +58,7 @@ * * @opensearch.internal */ -public final class RetentionLease implements ToXContentObject, Writeable { +public final class RetentionLease implements ToXContentObject, Writeable, ProtobufWriteable { private final String id; @@ -156,6 +161,33 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(source); } + /** + * Constructs a new retention lease from a stream. The retention lease should have been written. + * + * @param in the stream to construct the retention lease from + * @throws IOException if an I/O exception occurs reading from the stream + */ + public RetentionLease(final CodedInputStream in) throws IOException { + id = in.readString(); + retainingSequenceNumber = in.readInt64(); + timestamp = in.readInt64(); + source = in.readString(); + } + + /** + * Writes a retention lease to a stream in a manner suitable for later reconstruction. + * + * @param out the stream to write the retention lease to + * @throws IOException if an I/O exception occurs writing to the stream + */ + @Override + public void writeTo(final CodedOutputStream out) throws IOException { + out.writeStringNoTag(id); + out.writeInt64NoTag(retainingSequenceNumber); + out.writeInt64NoTag(timestamp); + out.writeStringNoTag(source); + } + private static final ParseField ID_FIELD = new ParseField("id"); private static final ParseField RETAINING_SEQUENCE_NUMBER_FIELD = new ParseField("retaining_sequence_number"); private static final ParseField TIMESTAMP_FIELD = new ParseField("timestamp"); diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java index a1ed615b83a14..845900a86d31d 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java @@ -32,11 +32,16 @@ package org.opensearch.index.seqno; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; + +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import org.opensearch.core.xcontent.XContent; import java.io.IOException; @@ -47,7 +52,7 @@ * * @opensearch.internal */ -public final class RetentionLeaseStats implements ToXContentFragment, Writeable { +public final class RetentionLeaseStats implements ToXContentFragment, Writeable, ProtobufWriteable { private final RetentionLeases retentionLeases; @@ -92,6 +97,27 @@ public void writeTo(final StreamOutput out) throws IOException { retentionLeases.writeTo(out); } + /** + * Constructs a new retention lease stats object from a stream. The retention lease stats should have been written. + * + * @param in the stream to construct the retention lease stats from + * @throws IOException if an I/O exception occurs reading from the stream + */ + public RetentionLeaseStats(final CodedInputStream in) throws IOException { + retentionLeases = new RetentionLeases(in); + } + + /** + * Writes a retention lease stats object to a stream in a manner suitable for later reconstruction. + * + * @param out the stream to write the retention lease stats to + * @throws IOException if an I/O exception occurs writing to the stream + */ + @Override + public void writeTo(final CodedOutputStream out) throws IOException { + retentionLeases.writeTo(out); + } + /** * Converts the retention lease stats to {@link XContent} using the specified builder and pararms. * diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java index a764ca3e7a581..753ab4eb81d2d 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java @@ -33,6 +33,9 @@ package org.opensearch.index.seqno; import org.opensearch.core.ParseField; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,6 +47,9 @@ import org.opensearch.core.xcontent.XContent; import org.opensearch.gateway.MetadataStateFormat; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.Collection; import java.util.Collections; @@ -59,7 +65,7 @@ * * @opensearch.internal */ -public class RetentionLeases implements ToXContentFragment, Writeable { +public class RetentionLeases implements ToXContentFragment, Writeable, ProtobufWriteable { private final long primaryTerm; @@ -193,6 +199,33 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeCollection(leases.values()); } + /** + * Constructs a new retention lease collection from a stream. The retention lease collection should have been written. + * + * @param in the stream to construct the retention lease collection from + * @throws IOException if an I/O exception occurs reading from the stream + */ + public RetentionLeases(final CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + primaryTerm = in.readInt64(); + version = in.readInt64(); + leases = Collections.unmodifiableMap(toMap(protobufStreamInput.readList(RetentionLease::new))); + } + + /** + * Writes a retention lease collection to a stream in a manner suitable for later reconstruction. + * + * @param out the stream to write the retention lease collection to + * @throws IOException if an I/O exception occurs writing to the stream + */ + @Override + public void writeTo(final CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + out.writeInt64NoTag(primaryTerm); + out.writeInt64NoTag(version); + protobufStreamOutput.writeCollection(leases.values(), (o, v) -> v.writeTo(o)); + } + private static final ParseField PRIMARY_TERM_FIELD = new ParseField("primary_term"); private static final ParseField VERSION_FIELD = new ParseField("version"); private static final ParseField LEASES_FIELD = new ParseField("leases"); diff --git a/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java b/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java index be7888ada2801..01a05263851e7 100644 --- a/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java +++ b/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java @@ -32,12 +32,16 @@ package org.opensearch.index.seqno; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.Objects; @@ -46,7 +50,7 @@ * * @opensearch.internal */ -public class SeqNoStats implements ToXContentFragment, Writeable { +public class SeqNoStats implements ToXContentFragment, Writeable, ProtobufWriteable { private static final String SEQ_NO = "seq_no"; private static final String MAX_SEQ_NO = "max_seq_no"; @@ -70,6 +74,10 @@ public SeqNoStats(StreamInput in) throws IOException { this(in.readZLong(), in.readZLong(), in.readZLong()); } + public SeqNoStats(CodedInputStream in) throws IOException { + this(in.readInt64(), in.readInt64(), in.readInt64()); + } + /** the maximum sequence number seen so far */ public long getMaxSeqNo() { return maxSeqNo; @@ -91,6 +99,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeZLong(globalCheckpoint); } + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeInt64NoTag(maxSeqNo); + out.writeInt64NoTag(localCheckpoint); + out.writeInt64NoTag(globalCheckpoint); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(SEQ_NO); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 2b85193275a13..316f6c37d1cd3 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -123,15 +123,19 @@ import org.opensearch.index.engine.EngineException; import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.NRTReplicationEngine; +import org.opensearch.index.engine.ProtobufSegmentsStats; import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.engine.RefreshFailedEngineException; import org.opensearch.index.engine.SafeCommitInfo; import org.opensearch.index.engine.Segment; import org.opensearch.index.engine.SegmentsStats; import org.opensearch.index.fielddata.FieldDataStats; +import org.opensearch.index.fielddata.ProtobufFieldDataStats; import org.opensearch.index.fielddata.ShardFieldData; import org.opensearch.index.flush.FlushStats; +import org.opensearch.index.flush.ProtobufFlushStats; import org.opensearch.index.get.GetStats; +import org.opensearch.index.get.ProtobufGetStats; import org.opensearch.index.get.ShardGetService; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.DocumentMapperForType; @@ -143,8 +147,12 @@ import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.mapper.Uid; import org.opensearch.index.merge.MergeStats; +import org.opensearch.index.merge.ProtobufMergeStats; +import org.opensearch.index.recovery.ProtobufRecoveryStats; import org.opensearch.index.recovery.RecoveryStats; +import org.opensearch.index.refresh.ProtobufRefreshStats; import org.opensearch.index.refresh.RefreshStats; +import org.opensearch.index.search.stats.ProtobufSearchStats; import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.search.stats.ShardSearchStats; @@ -157,6 +165,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.index.similarity.SimilarityService; +import org.opensearch.index.store.ProtobufStoreStats; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.index.store.Store.MetadataSnapshot; @@ -170,6 +179,7 @@ import org.opensearch.index.translog.TranslogFactory; import org.opensearch.index.translog.TranslogRecoveryRunner; import org.opensearch.index.translog.TranslogStats; +import org.opensearch.index.warmer.ProtobufWarmerStats; import org.opensearch.index.warmer.ShardIndexWarmerService; import org.opensearch.index.warmer.WarmerStats; import org.opensearch.indices.IndexingMemoryController; @@ -290,6 +300,7 @@ Runnable getGlobalCheckpointSyncer() { private volatile RecoveryState recoveryState; private final RecoveryStats recoveryStats = new RecoveryStats(); + private final ProtobufRecoveryStats protobufRecoveryStats = new ProtobufRecoveryStats(); private final MeanMetric refreshMetric = new MeanMetric(); private final MeanMetric externalRefreshMetric = new MeanMetric(); private final MeanMetric flushMetric = new MeanMetric(); @@ -1292,15 +1303,35 @@ public RefreshStats refreshStats() { ); } + public ProtobufRefreshStats protobufRefreshStats() { + int listeners = refreshListeners.pendingCount(); + return new ProtobufRefreshStats( + refreshMetric.count(), + TimeUnit.NANOSECONDS.toMillis(refreshMetric.sum()), + externalRefreshMetric.count(), + TimeUnit.NANOSECONDS.toMillis(externalRefreshMetric.sum()), + listeners + ); + } + public FlushStats flushStats() { return new FlushStats(flushMetric.count(), periodicFlushMetric.count(), TimeUnit.NANOSECONDS.toMillis(flushMetric.sum())); } + public ProtobufFlushStats protobufFlushStats() { + return new ProtobufFlushStats(flushMetric.count(), periodicFlushMetric.count(), TimeUnit.NANOSECONDS.toMillis(flushMetric.sum())); + } + public DocsStats docStats() { readAllowed(); return getEngine().docStats(); } + public ProtobufDocsStats protobufDocsStats() { + readAllowed(); + return getEngine().protobufDocsStats(); + } + /** * @return {@link CommitStats} * @throws AlreadyClosedException if shard is closed @@ -1331,14 +1362,36 @@ public IndexingStats indexingStats() { return internalIndexingStats.stats(throttled, throttleTimeInMillis); } + public ProtobufIndexingStats protobufIndexingStats() { + Engine engine = getEngineOrNull(); + final boolean throttled; + final long throttleTimeInMillis; + if (engine == null) { + throttled = false; + throttleTimeInMillis = 0; + } else { + throttled = engine.isThrottled(); + throttleTimeInMillis = engine.getIndexThrottleTimeInMillis(); + } + return internalIndexingStats.protobufStats(throttled, throttleTimeInMillis); + } + public SearchStats searchStats(String... groups) { return searchStats.stats(groups); } + public ProtobufSearchStats protobufSearchStats(String... groups) { + return searchStats.protobufStats(groups); + } + public GetStats getStats() { return getService.stats(); } + public ProtobufGetStats getProtobufStats() { + return getService.protobufStats(); + } + public StoreStats storeStats() { try { final RecoveryState recoveryState = this.recoveryState; @@ -1350,6 +1403,17 @@ public StoreStats storeStats() { } } + public ProtobufStoreStats protobufStoreStats() { + try { + final RecoveryState recoveryState = this.recoveryState; + final long bytesStillToRecover = recoveryState == null ? -1L : recoveryState.getIndex().bytesStillToRecover(); + return store.protobufStats(bytesStillToRecover == -1 ? StoreStats.UNKNOWN_RESERVED_BYTES : bytesStillToRecover); + } catch (IOException e) { + failShard("Failing shard because of exception during storeStats", e); + throw new OpenSearchException("io exception while building 'store stats'", e); + } + } + public MergeStats mergeStats() { final Engine engine = getEngineOrNull(); if (engine == null) { @@ -1358,20 +1422,42 @@ public MergeStats mergeStats() { return engine.getMergeStats(); } + public ProtobufMergeStats protobufMergeStats() { + final Engine engine = getEngineOrNull(); + if (engine == null) { + return new ProtobufMergeStats(); + } + return engine.getProtobufMergeStats(); + } + public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean includeUnloadedSegments) { SegmentsStats segmentsStats = getEngine().segmentsStats(includeSegmentFileSizes, includeUnloadedSegments); segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes()); return segmentsStats; } + public ProtobufSegmentsStats protobufSegmentStats(boolean includeSegmentFileSizes, boolean includeUnloadedSegments) { + ProtobufSegmentsStats segmentsStats = getEngine().protobufSegmentsStats(includeSegmentFileSizes, includeUnloadedSegments); + segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes()); + return segmentsStats; + } + public WarmerStats warmerStats() { return shardWarmerService.stats(); } + public ProtobufWarmerStats protobufWarmerStats() { + return shardWarmerService.protobufStats(); + } + public FieldDataStats fieldDataStats(String... fields) { return shardFieldData.stats(fields); } + public ProtobufFieldDataStats protobufFieldDataStats(String... fields) { + return shardFieldData.protobufStats(fields); + } + public TranslogStats translogStats() { return getEngine().translogManager().getTranslogStats(); } @@ -2413,6 +2499,13 @@ public RecoveryStats recoveryStats() { return recoveryStats; } + /** + * returns stats about ongoing recoveries, both source and target + */ + public ProtobufRecoveryStats protobufRecoveryStats() { + return protobufRecoveryStats; + } + /** * Returns the current {@link RecoveryState} if this shard is recovering or has been recovering. * Returns null if the recovery has not yet started or shard was not recovered (created via an API). diff --git a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java index d7e15dd3e40f5..19beb43ee5111 100644 --- a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java @@ -58,6 +58,16 @@ IndexingStats stats(boolean isThrottled, long currentThrottleInMillis) { return new IndexingStats(total); } + /** + * Returns the stats, including type specific stats. If the types are null/0 length, then nothing + * is returned for them. If they are set, then only types provided will be returned, or + * {@code _all} for all types. + */ + ProtobufIndexingStats protobufStats(boolean isThrottled, long currentThrottleInMillis) { + ProtobufIndexingStats.Stats total = totalStats.protobufStats(isThrottled, currentThrottleInMillis); + return new ProtobufIndexingStats(total); + } + @Override public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { if (operation.origin().isRecovery() == false) { @@ -157,5 +167,20 @@ IndexingStats.Stats stats(boolean isThrottled, long currentThrottleMillis) { TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis) ); } + + ProtobufIndexingStats.Stats protobufStats(boolean isThrottled, long currentThrottleMillis) { + return new ProtobufIndexingStats.Stats( + indexMetric.count(), + TimeUnit.NANOSECONDS.toMillis(indexMetric.sum()), + indexCurrent.count(), + indexFailed.count(), + deleteMetric.count(), + TimeUnit.NANOSECONDS.toMillis(deleteMetric.sum()), + deleteCurrent.count(), + noopUpdates.count(), + isThrottled, + TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis) + ); + } } } diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 921deae41946a..e31b87348b4e9 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -482,6 +482,14 @@ public StoreStats stats(long reservedBytes) throws IOException { return new StoreStats(directory.estimateSize(), reservedBytes); } + /** + * @param reservedBytes a prediction of how much larger the store is expected to grow, or {@link StoreStats#UNKNOWN_RESERVED_BYTES}. + */ + public ProtobufStoreStats protobufStats(long reservedBytes) throws IOException { + ensureOpen(); + return new ProtobufStoreStats(directory.estimateSize(), reservedBytes); + } + /** * Increments the refCount of this Store instance. RefCounts are used to determine when a * Store can be closed safely, i.e. as soon as there are no more references. Be sure to always call a diff --git a/server/src/main/java/org/opensearch/index/warmer/ShardIndexWarmerService.java b/server/src/main/java/org/opensearch/index/warmer/ShardIndexWarmerService.java index ff8f31297d47d..af842da0618f8 100644 --- a/server/src/main/java/org/opensearch/index/warmer/ShardIndexWarmerService.java +++ b/server/src/main/java/org/opensearch/index/warmer/ShardIndexWarmerService.java @@ -71,4 +71,8 @@ public void onPostWarm(long tookInNanos) { public WarmerStats stats() { return new WarmerStats(current.count(), warmerMetric.count(), TimeUnit.NANOSECONDS.toMillis(warmerMetric.sum())); } + + public ProtobufWarmerStats protobufStats() { + return new ProtobufWarmerStats(current.count(), warmerMetric.count(), TimeUnit.NANOSECONDS.toMillis(warmerMetric.sum())); + } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesQueryCache.java b/server/src/main/java/org/opensearch/indices/IndicesQueryCache.java index 8f7df5157d673..9015f1498ae5e 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesQueryCache.java @@ -49,6 +49,7 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.index.cache.query.ProtobufQueryCacheStats; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.core.index.shard.ShardId; @@ -142,6 +143,35 @@ public QueryCacheStats getStats(ShardId shard) { return shardStats; } + /** Get usage statistics for the given shard. */ + public ProtobufQueryCacheStats getProtobufStats(ShardId shard) { + final Map stats = new HashMap<>(); + for (Map.Entry entry : shardStats.entrySet()) { + stats.put(entry.getKey(), entry.getValue().toProtobufQueryCacheStats()); + } + ProtobufQueryCacheStats shardStats = new ProtobufQueryCacheStats(); + ProtobufQueryCacheStats info = stats.get(shard); + if (info == null) { + info = new ProtobufQueryCacheStats(); + } + shardStats.add(info); + + // We also have some shared ram usage that we try to distribute to + // proportionally to their number of cache entries of each shard + if (stats.isEmpty()) { + shardStats.add(new ProtobufQueryCacheStats(sharedRamBytesUsed, 0, 0, 0, 0)); + } else { + long totalSize = 0; + for (ProtobufQueryCacheStats s : stats.values()) { + totalSize += s.getCacheSize(); + } + final double weight = totalSize == 0 ? 1d / stats.size() : ((double) shardStats.getCacheSize()) / totalSize; + final long additionalRamBytesUsed = Math.round(weight * sharedRamBytesUsed); + shardStats.add(new ProtobufQueryCacheStats(additionalRamBytesUsed, 0, 0, 0, 0)); + } + return shardStats; + } + @Override public Weight doCache(Weight weight, QueryCachingPolicy policy) { while (weight instanceof CachingWeightWrapper) { @@ -242,6 +272,10 @@ QueryCacheStats toQueryCacheStats() { return new QueryCacheStats(ramBytesUsed, hitCount, missCount, cacheCount, cacheSize); } + ProtobufQueryCacheStats toProtobufQueryCacheStats() { + return new ProtobufQueryCacheStats(ramBytesUsed, hitCount, missCount, cacheCount, cacheSize); + } + @Override public String toString() { return "{shardId=" diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 7be824d95b421..2b89024b6a46b 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -46,6 +46,9 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.opensearch.action.admin.indices.stats.IndexShardStats; +import org.opensearch.action.admin.indices.stats.ProtobufCommonStats; +import org.opensearch.action.admin.indices.stats.ProtobufIndexShardStats; +import org.opensearch.action.admin.indices.stats.ProtobufShardStats; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.search.SearchType; import org.opensearch.client.Client; @@ -112,14 +115,20 @@ import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.flush.FlushStats; +import org.opensearch.index.flush.ProtobufFlushStats; import org.opensearch.index.get.GetStats; +import org.opensearch.index.get.ProtobufGetStats; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.merge.MergeStats; +import org.opensearch.index.merge.ProtobufMergeStats; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryRewriteContext; +import org.opensearch.index.recovery.ProtobufRecoveryStats; import org.opensearch.index.recovery.RecoveryStats; +import org.opensearch.index.refresh.ProtobufRefreshStats; import org.opensearch.index.refresh.RefreshStats; +import org.opensearch.index.search.stats.ProtobufSearchStats; import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.seqno.RetentionLeaseStats; @@ -131,6 +140,7 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; +import org.opensearch.index.shard.ProtobufIndexingStats; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.remote.filecache.FileCacheCleaner; import org.opensearch.index.translog.InternalTranslogFactory; @@ -296,6 +306,7 @@ public class IndicesService extends AbstractLifecycleComponent private final Map> pendingDeletes = new HashMap<>(); private final AtomicInteger numUncompletedDeletes = new AtomicInteger(); private final OldShardsStats oldShardsStats = new OldShardsStats(); + private final ProtobufOldShardsStats protobufOldShardsStats = new ProtobufOldShardsStats(); private final MapperRegistry mapperRegistry; private final NamedWriteableRegistry namedWriteableRegistry; private final IndexingMemoryController indexingMemoryController; @@ -542,6 +553,38 @@ public NodeIndicesStats stats(CommonStatsFlags flags) { return new NodeIndicesStats(commonStats, statsByShard(this, flags)); } + public ProtobufNodeIndicesStats protobufStats(CommonStatsFlags flags) { + ProtobufCommonStats commonStats = new ProtobufCommonStats(flags); + // the cumulative statistics also account for shards that are no longer on this node, which is tracked by oldShardsStats + for (Flag flag : flags.getFlags()) { + switch (flag) { + case Get: + commonStats.get.add(protobufOldShardsStats.getStats); + break; + case Indexing: + commonStats.indexing.add(protobufOldShardsStats.indexingStats); + break; + case Search: + commonStats.search.add(protobufOldShardsStats.searchStats); + break; + case Merge: + commonStats.merge.add(protobufOldShardsStats.mergeStats); + break; + case Refresh: + commonStats.refresh.add(protobufOldShardsStats.refreshStats); + break; + case Recovery: + commonStats.recoveryStats.add(protobufOldShardsStats.recoveryStats); + break; + case Flush: + commonStats.flush.add(protobufOldShardsStats.flushStats); + break; + } + } + + return new ProtobufNodeIndicesStats(commonStats, statsByShardProtobuf(this, flags)); + } + Map> statsByShard(final IndicesService indicesService, final CommonStatsFlags flags) { final Map> statsByShard = new HashMap<>(); @@ -569,6 +612,37 @@ Map> statsByShard(final IndicesService indicesServi return statsByShard; } + Map> statsByShardProtobuf(final IndicesService indicesService, final CommonStatsFlags flags) { + final Map> statsByShard = new HashMap<>(); + + for (final IndexService indexService : indicesService) { + for (final IndexShard indexShard : indexService) { + try { + final ProtobufIndexShardStats indexShardStats = indicesService.indexShardStatsProtobuf( + indicesService, + indexShard, + flags + ); + + if (indexShardStats == null) { + continue; + } + + if (statsByShard.containsKey(indexService.index()) == false) { + statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats)); + } else { + statsByShard.get(indexService.index()).add(indexShardStats); + } + } catch (IllegalIndexShardStateException | AlreadyClosedException e) { + // we can safely ignore illegal state on ones that are closing for example + logger.trace(() -> new ParameterizedMessage("{} ignoring shard stats", indexShard.shardId()), e); + } + } + } + + return statsByShard; + } + IndexShardStats indexShardStats(final IndicesService indicesService, final IndexShard indexShard, final CommonStatsFlags flags) { if (indexShard.routingEntry() == null) { return null; @@ -602,6 +676,43 @@ IndexShardStats indexShardStats(final IndicesService indicesService, final Index ); } + ProtobufIndexShardStats indexShardStatsProtobuf( + final IndicesService indicesService, + final IndexShard indexShard, + final CommonStatsFlags flags + ) { + if (indexShard.routingEntry() == null) { + return null; + } + + CommitStats commitStats; + SeqNoStats seqNoStats; + RetentionLeaseStats retentionLeaseStats; + try { + commitStats = indexShard.commitStats(); + seqNoStats = indexShard.seqNoStats(); + retentionLeaseStats = indexShard.getRetentionLeaseStats(); + } catch (AlreadyClosedException e) { + // shard is closed - no stats is fine + commitStats = null; + seqNoStats = null; + retentionLeaseStats = null; + } + + return new ProtobufIndexShardStats( + indexShard.shardId(), + new ProtobufShardStats[] { + new ProtobufShardStats( + indexShard.routingEntry(), + indexShard.shardPath(), + new ProtobufCommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), + commitStats, + seqNoStats, + retentionLeaseStats + ) } + ); + } + /** * Checks if changes (adding / removing) indices, shards and so on are allowed. * @@ -1032,6 +1143,36 @@ public synchronized void beforeIndexShardClosed(ShardId shardId, @Nullable Index } } + /** + * Statistics for old shards + * + * @opensearch.internal + */ + static class ProtobufOldShardsStats implements IndexEventListener { + + final ProtobufSearchStats searchStats = new ProtobufSearchStats(); + final ProtobufGetStats getStats = new ProtobufGetStats(); + final ProtobufIndexingStats indexingStats = new ProtobufIndexingStats(); + final ProtobufMergeStats mergeStats = new ProtobufMergeStats(); + final ProtobufRefreshStats refreshStats = new ProtobufRefreshStats(); + final ProtobufFlushStats flushStats = new ProtobufFlushStats(); + final ProtobufRecoveryStats recoveryStats = new ProtobufRecoveryStats(); + + @Override + public synchronized void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { + if (indexShard != null) { + getStats.addTotals(indexShard.getProtobufStats()); + indexingStats.addTotals(indexShard.protobufIndexingStats()); + // if this index was closed or deleted, we should eliminate the effect of the current scroll for this shard + searchStats.addTotalsForClosingShard(indexShard.protobufSearchStats()); + mergeStats.addTotals(indexShard.protobufMergeStats()); + refreshStats.addTotals(indexShard.protobufRefreshStats()); + flushStats.addTotals(indexShard.protobufFlushStats()); + recoveryStats.addTotals(indexShard.protobufRecoveryStats()); + } + } + } + /** * Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index * but does not deal with in-memory structures. For those call {@link #removeIndex(Index, IndexRemovalReason, String)} diff --git a/server/src/main/java/org/opensearch/indices/ProtobufNodeIndicesStats.java b/server/src/main/java/org/opensearch/indices/ProtobufNodeIndicesStats.java index 296dfb89bba05..27269cb02626a 100644 --- a/server/src/main/java/org/opensearch/indices/ProtobufNodeIndicesStats.java +++ b/server/src/main/java/org/opensearch/indices/ProtobufNodeIndicesStats.java @@ -10,11 +10,15 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; + +import org.opensearch.action.admin.indices.stats.ProtobufIndexShardStats; import org.opensearch.action.admin.indices.stats.ProtobufCommonStats; +import org.opensearch.action.admin.indices.stats.ProtobufShardStats; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.Index; import org.opensearch.index.cache.query.ProtobufQueryCacheStats; import org.opensearch.index.cache.request.ProtobufRequestCacheStats; import org.opensearch.index.engine.ProtobufSegmentsStats; @@ -33,6 +37,8 @@ import org.opensearch.search.suggest.completion.ProtobufCompletionStats; import java.io.IOException; +import java.util.List; +import java.util.Map; /** * Global information on indices stats running on a specific node. @@ -42,14 +48,25 @@ public class ProtobufNodeIndicesStats implements ProtobufWriteable, ToXContentFragment { private ProtobufCommonStats stats; + private Map> statsByShard; public ProtobufNodeIndicesStats(CodedInputStream in) throws IOException { stats = new ProtobufCommonStats(in); } - public ProtobufNodeIndicesStats(ProtobufCommonStats oldStats) { + public ProtobufNodeIndicesStats(ProtobufCommonStats oldStats, Map> statsByShard) { + // this.stats = stats; + this.statsByShard = statsByShard; + // make a total common stats from old ones and current ones this.stats = oldStats; + for (List shardStatsList : statsByShard.values()) { + for (ProtobufIndexShardStats indexShardStats : shardStatsList) { + for (ProtobufShardStats shardStats : indexShardStats.getShards()) { + stats.add(shardStats.getStats()); + } + } + } } @Nullable diff --git a/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java index 34cadd2c828d0..b05832b61a405 100644 --- a/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -344,6 +344,39 @@ public CircuitBreakerStats stats(String name) { ); } + @Override + public ProtobufAllCircuitBreakerStats protobufStats() { + List allStats = new ArrayList<>(this.breakers.size()); + // Gather the "estimated" count for the parent breaker by adding the + // estimations for each individual breaker + for (CircuitBreaker breaker : this.breakers.values()) { + allStats.add(protobufStats(breaker.getName())); + } + // Manually add the parent breaker settings since they aren't part of the breaker map + allStats.add( + new ProtobufCircuitBreakerStats( + CircuitBreaker.PARENT, + parentSettings.getLimit(), + memoryUsed(0L).totalUsage, + 1.0, + parentTripCount.get() + ) + ); + return new ProtobufAllCircuitBreakerStats(allStats.toArray(new ProtobufCircuitBreakerStats[0])); + } + + @Override + public ProtobufCircuitBreakerStats protobufStats(String name) { + CircuitBreaker breaker = this.breakers.get(name); + return new ProtobufCircuitBreakerStats( + breaker.getName(), + breaker.getLimit(), + breaker.getUsed(), + breaker.getOverhead(), + breaker.getTrippedCount() + ); + } + /** * Tracks memory usage * diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index 52ced9c051d14..3d6446c72e90d 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -71,6 +71,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.analysis.AnalysisRegistry; +import org.opensearch.node.ProtobufReportingService; import org.opensearch.node.ReportingService; import org.opensearch.plugins.IngestPlugin; import org.opensearch.script.ScriptService; @@ -98,7 +99,7 @@ * * @opensearch.internal */ -public class IngestService implements ClusterStateApplier, ReportingService { +public class IngestService implements ClusterStateApplier, ReportingService, ProtobufReportingService { public static final String NOOP_PIPELINE_NAME = "_none"; @@ -433,6 +434,16 @@ public IngestInfo info() { return new IngestInfo(processorInfoList); } + @Override + public ProtobufIngestInfo protobufInfo() { + Map processorFactories = getProcessorFactories(); + List processorInfoList = new ArrayList<>(processorFactories.size()); + for (Map.Entry entry : processorFactories.entrySet()) { + processorInfoList.add(new ProtobufProcessorInfo(entry.getKey())); + } + return new ProtobufIngestInfo(processorInfoList); + } + Map pipelines() { return pipelines; } @@ -692,6 +703,24 @@ public IngestStats stats() { return statsBuilder.build(); } + public ProtobufIngestStats protobufStats() { + ProtobufIngestStats.Builder statsBuilder = new ProtobufIngestStats.Builder(); + statsBuilder.addTotalMetrics(totalMetrics); + pipelines.forEach((id, holder) -> { + Pipeline pipeline = holder.pipeline; + CompoundProcessor rootProcessor = pipeline.getCompoundProcessor(); + statsBuilder.addPipelineMetrics(id, pipeline.getMetrics()); + List> processorMetrics = new ArrayList<>(); + getProcessorMetrics(rootProcessor, processorMetrics); + processorMetrics.forEach(t -> { + Processor processor = t.v1(); + IngestMetric processorMetric = t.v2(); + statsBuilder.addProcessorMetrics(id, getProcessorName(processor), processor.getType(), processorMetric); + }); + }); + return statsBuilder.build(); + } + /** * Adds a listener that gets invoked with the current cluster state before processor factories * get invoked. diff --git a/server/src/main/java/org/opensearch/ingest/ProtobufIngestInfo.java b/server/src/main/java/org/opensearch/ingest/ProtobufIngestInfo.java index d7a72cb078219..52d26218e61d5 100644 --- a/server/src/main/java/org/opensearch/ingest/ProtobufIngestInfo.java +++ b/server/src/main/java/org/opensearch/ingest/ProtobufIngestInfo.java @@ -10,6 +10,8 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; + +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.ProtobufReportingService; import java.io.IOException; @@ -56,4 +58,16 @@ public Iterable getProcessors() { public boolean containsProcessor(String type) { return processors.contains(new ProtobufProcessorInfo(type)); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("ingest"); + builder.startArray("processors"); + for (ProtobufProcessorInfo info : processors) { + info.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/opensearch/ingest/ProtobufProcessorInfo.java b/server/src/main/java/org/opensearch/ingest/ProtobufProcessorInfo.java index debfcc669921c..199f21aab12af 100644 --- a/server/src/main/java/org/opensearch/ingest/ProtobufProcessorInfo.java +++ b/server/src/main/java/org/opensearch/ingest/ProtobufProcessorInfo.java @@ -11,6 +11,9 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.ToXContent.Params; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; @@ -19,7 +22,7 @@ * * @opensearch.internal */ -public class ProtobufProcessorInfo implements ProtobufWriteable { +public class ProtobufProcessorInfo implements ProtobufWriteable, ToXContentObject { private final String type; @@ -45,4 +48,12 @@ public void writeTo(CodedOutputStream out) throws IOException { public String getType() { return type; } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("type", type); + builder.endObject(); + return null; + } } diff --git a/server/src/main/java/org/opensearch/monitor/ProtobufMonitorService.java b/server/src/main/java/org/opensearch/monitor/ProtobufMonitorService.java new file mode 100644 index 0000000000000..067ef7c193d0d --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/ProtobufMonitorService.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.monitor; + +import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.store.remote.filecache.FileCache; +import org.opensearch.monitor.fs.ProtobufFsService; +import org.opensearch.monitor.jvm.JvmGcMonitorService; +import org.opensearch.monitor.jvm.ProtobufJvmService; +import org.opensearch.monitor.os.ProtobufOsService; +import org.opensearch.monitor.process.ProtobufProcessService; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; + +/** + * The resource monitoring service + * + * @opensearch.internal + */ +public class ProtobufMonitorService extends AbstractLifecycleComponent { + + private final JvmGcMonitorService jvmGcMonitorService; + private final ProtobufOsService osService; + private final ProtobufProcessService processService; + private final ProtobufJvmService jvmService; + private final ProtobufFsService fsService; + + public ProtobufMonitorService(Settings settings, NodeEnvironment nodeEnvironment, ThreadPool threadPool, FileCache fileCache) + throws IOException { + this.jvmGcMonitorService = new JvmGcMonitorService(settings, threadPool); + this.osService = new ProtobufOsService(settings); + this.processService = new ProtobufProcessService(settings); + this.jvmService = new ProtobufJvmService(settings); + this.fsService = new ProtobufFsService(settings, nodeEnvironment, fileCache); + } + + public ProtobufOsService osService() { + return this.osService; + } + + public ProtobufProcessService processService() { + return this.processService; + } + + public ProtobufJvmService jvmService() { + return this.jvmService; + } + + public ProtobufFsService fsService() { + return this.fsService; + } + + @Override + protected void doStart() { + jvmGcMonitorService.start(); + } + + @Override + protected void doStop() { + jvmGcMonitorService.stop(); + } + + @Override + protected void doClose() { + jvmGcMonitorService.close(); + } + +} diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java index e20d84cd9763e..c2a31f8a4be8c 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java @@ -97,6 +97,33 @@ public FsInfo stats(FsInfo previous) throws IOException { return new FsInfo(System.currentTimeMillis(), ioStats, paths); } + public ProtobufFsInfo protobufStats(ProtobufFsInfo previous) throws IOException { + if (!nodeEnv.hasNodeFile()) { + return new ProtobufFsInfo(System.currentTimeMillis(), null, new ProtobufFsInfo.Path[0]); + } + NodePath[] dataLocations = nodeEnv.nodePaths(); + ProtobufFsInfo.Path[] paths = new ProtobufFsInfo.Path[dataLocations.length]; + for (int i = 0; i < dataLocations.length; i++) { + paths[i] = getProtobufFsInfo(dataLocations[i]); + if (fileCache != null && dataLocations[i].fileCacheReservedSize != ByteSizeValue.ZERO) { + paths[i].fileCacheReserved = adjustForHugeFilesystems(dataLocations[i].fileCacheReservedSize.getBytes()); + paths[i].fileCacheUtilized = adjustForHugeFilesystems(fileCache.usage().usage()); + paths[i].available -= (paths[i].fileCacheReserved - paths[i].fileCacheUtilized); + } + } + ProtobufFsInfo.IoStats ioStats = null; + if (Constants.LINUX) { + Set> devicesNumbers = new HashSet<>(); + for (NodePath dataLocation : dataLocations) { + if (dataLocation.majorDeviceNumber != -1 && dataLocation.minorDeviceNumber != -1) { + devicesNumbers.add(Tuple.tuple(dataLocation.majorDeviceNumber, dataLocation.minorDeviceNumber)); + } + } + ioStats = protobufIoStats(devicesNumbers, previous); + } + return new ProtobufFsInfo(System.currentTimeMillis(), ioStats, paths); + } + final FsInfo.IoStats ioStats(final Set> devicesNumbers, final FsInfo previous) { try { final Map, FsInfo.DeviceStats> deviceMap = new HashMap<>(); @@ -149,6 +176,58 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, } } + final ProtobufFsInfo.IoStats protobufIoStats(final Set> devicesNumbers, final ProtobufFsInfo previous) { + try { + final Map, ProtobufFsInfo.DeviceStats> deviceMap = new HashMap<>(); + if (previous != null && previous.getIoStats() != null && previous.getIoStats().devicesStats != null) { + for (int i = 0; i < previous.getIoStats().devicesStats.length; i++) { + ProtobufFsInfo.DeviceStats deviceStats = previous.getIoStats().devicesStats[i]; + deviceMap.put(Tuple.tuple(deviceStats.majorDeviceNumber, deviceStats.minorDeviceNumber), deviceStats); + } + } + + List devicesStats = new ArrayList<>(); + + List lines = readProcDiskStats(); + if (!lines.isEmpty()) { + for (String line : lines) { + String fields[] = line.trim().split("\\s+"); + final int majorDeviceNumber = Integer.parseInt(fields[0]); + final int minorDeviceNumber = Integer.parseInt(fields[1]); + if (!devicesNumbers.contains(Tuple.tuple(majorDeviceNumber, minorDeviceNumber))) { + continue; + } + final String deviceName = fields[2]; + final long readsCompleted = Long.parseLong(fields[3]); + final long sectorsRead = Long.parseLong(fields[5]); + final long writesCompleted = Long.parseLong(fields[7]); + final long sectorsWritten = Long.parseLong(fields[9]); + final ProtobufFsInfo.DeviceStats deviceStats = new ProtobufFsInfo.DeviceStats( + majorDeviceNumber, + minorDeviceNumber, + deviceName, + readsCompleted, + sectorsRead, + writesCompleted, + sectorsWritten, + deviceMap.get(Tuple.tuple(majorDeviceNumber, minorDeviceNumber)) + ); + devicesStats.add(deviceStats); + } + } + + return new ProtobufFsInfo.IoStats(devicesStats.toArray(new ProtobufFsInfo.DeviceStats[0])); + } catch (Exception e) { + // do not fail Elasticsearch if something unexpected + // happens here + logger.debug( + () -> new ParameterizedMessage("unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), + e + ); + return null; + } + } + @SuppressForbidden(reason = "read /proc/diskstats") List readProcDiskStats() throws IOException { return Files.readAllLines(PathUtils.get("/proc/diskstats")); @@ -182,4 +261,20 @@ public static FsInfo.Path getFSInfo(NodePath nodePath) throws IOException { return fsPath; } + public static ProtobufFsInfo.Path getProtobufFsInfo(NodePath nodePath) throws IOException { + ProtobufFsInfo.Path fsPath = new ProtobufFsInfo.Path(); + fsPath.path = nodePath.path.toString(); + + // NOTE: we use already cached (on node startup) FileStore and spins + // since recomputing these once per second (default) could be costly, + // and they should not change: + fsPath.total = adjustForHugeFilesystems(nodePath.fileStore.getTotalSpace()); + fsPath.free = adjustForHugeFilesystems(nodePath.fileStore.getUnallocatedSpace()); + fsPath.available = adjustForHugeFilesystems(nodePath.fileStore.getUsableSpace()); + fsPath.fileCacheReserved = adjustForHugeFilesystems(nodePath.fileCacheReservedSize.getBytes()); + fsPath.type = nodePath.fileStore.type(); + fsPath.mount = nodePath.fileStore.toString(); + return fsPath; + } + } diff --git a/server/src/main/java/org/opensearch/monitor/fs/ProtobufFsService.java b/server/src/main/java/org/opensearch/monitor/fs/ProtobufFsService.java new file mode 100644 index 0000000000000..e959d2a46293b --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/fs/ProtobufFsService.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.monitor.fs; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.SingleObjectCache; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.store.remote.filecache.FileCache; + +import java.io.IOException; +import java.util.function.Supplier; + +/** + * FileSystem service + * + * @opensearch.internal + */ +public class ProtobufFsService { + + private static final Logger logger = LogManager.getLogger(ProtobufFsService.class); + + private final Supplier fsInfoSupplier; + + public static final Setting REFRESH_INTERVAL_SETTING = Setting.timeSetting( + "monitor.fs.refresh_interval", + TimeValue.timeValueSeconds(1), + TimeValue.timeValueSeconds(1), + Property.NodeScope + ); + + // permits tests to bypass the refresh interval on the cache; deliberately unregistered since it is only for use in tests + public static final Setting ALWAYS_REFRESH_SETTING = Setting.boolSetting( + "monitor.fs.always_refresh", + false, + Property.NodeScope + ); + + public ProtobufFsService(final Settings settings, final NodeEnvironment nodeEnvironment, FileCache fileCache) { + final FsProbe probe = new FsProbe(nodeEnvironment, fileCache); + final ProtobufFsInfo initialValue = stats(probe, null); + if (ALWAYS_REFRESH_SETTING.get(settings)) { + assert REFRESH_INTERVAL_SETTING.exists(settings) == false; + logger.debug("bypassing refresh_interval"); + fsInfoSupplier = () -> stats(probe, initialValue); + } else { + final TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings); + logger.debug("using refresh_interval [{}]", refreshInterval); + fsInfoSupplier = new FsInfoCache(refreshInterval, initialValue, probe)::getOrRefresh; + } + } + + public ProtobufFsInfo stats() { + return fsInfoSupplier.get(); + } + + private static ProtobufFsInfo stats(FsProbe probe, ProtobufFsInfo initialValue) { + try { + return probe.protobufStats(initialValue); + } catch (IOException e) { + logger.debug("unexpected exception reading filesystem info", e); + return null; + } + } + + private static class FsInfoCache extends SingleObjectCache { + + private final ProtobufFsInfo initialValue; + private final FsProbe probe; + + FsInfoCache(TimeValue interval, ProtobufFsInfo initialValue, FsProbe probe) { + super(interval, initialValue); + this.initialValue = initialValue; + this.probe = probe; + } + + @Override + protected ProtobufFsInfo refresh() { + return stats(probe, initialValue); + } + + } + +} diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java index 9eafd80203a87..5ca0ff8ed1809 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java @@ -36,13 +36,20 @@ import org.opensearch.common.Booleans; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; +import org.opensearch.common.io.stream.ProtobufStreamInput; +import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.node.ProtobufReportingService; import org.opensearch.node.ReportingService; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; @@ -61,7 +68,7 @@ * * @opensearch.internal */ -public class JvmInfo implements ReportingService.Info { +public class JvmInfo implements ReportingService.Info, ProtobufReportingService.ProtobufInfo { private static JvmInfo INSTANCE; @@ -356,6 +363,65 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(useCompressedOops); } + public JvmInfo(CodedInputStream in) throws IOException { + ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); + pid = in.readInt64(); + version = in.readString(); + vmName = in.readString(); + vmVersion = in.readString(); + vmVendor = in.readString(); + bundledJdk = in.readBool(); + usingBundledJdk = protobufStreamInput.readOptionalBoolean(); + startTime = in.readInt64(); + inputArguments = new String[in.readInt32()]; + for (int i = 0; i < inputArguments.length; i++) { + inputArguments[i] = in.readString(); + } + bootClassPath = in.readString(); + classPath = in.readString(); + systemProperties = protobufStreamInput.readMap(CodedInputStream::readString, CodedInputStream::readString); + mem = new Mem(in); + gcCollectors = protobufStreamInput.readStringArray(); + memoryPools = protobufStreamInput.readStringArray(); + useCompressedOops = in.readString(); + // the following members are only used locally for bootstrap checks, never serialized nor printed out + this.configuredMaxHeapSize = -1; + this.configuredInitialHeapSize = -1; + this.onError = null; + this.onOutOfMemoryError = null; + this.useG1GC = "unknown"; + this.useSerialGC = "unknown"; + this.g1RegionSize = -1; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); + out.writeInt64NoTag(pid); + out.writeStringNoTag(version); + out.writeStringNoTag(vmName); + out.writeStringNoTag(vmVersion); + out.writeStringNoTag(vmVendor); + out.writeBoolNoTag(bundledJdk); + protobufStreamOutput.writeOptionalBoolean(usingBundledJdk); + out.writeInt64NoTag(startTime); + out.writeInt32NoTag(inputArguments.length); + for (String inputArgument : inputArguments) { + out.writeStringNoTag(inputArgument); + } + out.writeStringNoTag(bootClassPath); + out.writeStringNoTag(classPath); + out.writeInt32NoTag(this.systemProperties.size()); + for (Map.Entry entry : systemProperties.entrySet()) { + out.writeStringNoTag(entry.getKey()); + out.writeStringNoTag(entry.getValue()); + } + mem.writeTo(out); + protobufStreamOutput.writeStringArray(gcCollectors); + protobufStreamOutput.writeStringArray(memoryPools); + out.writeStringNoTag(useCompressedOops); + } + /** * The process id. */ @@ -597,7 +663,7 @@ static final class Fields { * * @opensearch.internal */ - public static class Mem implements Writeable { + public static class Mem implements Writeable, ProtobufWriteable { private final long heapInit; private final long heapMax; @@ -630,6 +696,23 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(directMemoryMax); } + public Mem(CodedInputStream in) throws IOException { + this.heapInit = in.readInt64(); + this.heapMax = in.readInt64(); + this.nonHeapInit = in.readInt64(); + this.nonHeapMax = in.readInt64(); + this.directMemoryMax = in.readInt64(); + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + out.writeInt64NoTag(heapInit); + out.writeInt64NoTag(heapMax); + out.writeInt64NoTag(nonHeapInit); + out.writeInt64NoTag(nonHeapMax); + out.writeInt64NoTag(directMemoryMax); + } + public ByteSizeValue getHeapInit() { return new ByteSizeValue(heapInit); } diff --git a/server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmInfo.java b/server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmInfo.java deleted file mode 100644 index 6216248191550..0000000000000 --- a/server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmInfo.java +++ /dev/null @@ -1,571 +0,0 @@ -/* -* SPDX-License-Identifier: Apache-2.0 -* -* The OpenSearch Contributors require contributions made to -* this file be licensed under the Apache-2.0 license or a -* compatible open source license. -*/ - -package org.opensearch.monitor.jvm; - -import com.google.protobuf.CodedInputStream; -import com.google.protobuf.CodedOutputStream; -import org.apache.lucene.util.Constants; -import org.opensearch.common.Booleans; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.io.PathUtils; -import org.opensearch.common.io.stream.ProtobufStreamInput; -import org.opensearch.common.io.stream.ProtobufStreamOutput; -import org.opensearch.common.io.stream.ProtobufWriteable; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.node.ProtobufReportingService; - -import java.io.IOException; -import java.lang.management.GarbageCollectorMXBean; -import java.lang.management.ManagementFactory; -import java.lang.management.ManagementPermission; -import java.lang.management.MemoryMXBean; -import java.lang.management.MemoryPoolMXBean; -import java.lang.management.PlatformManagedObject; -import java.lang.management.RuntimeMXBean; -import java.lang.reflect.Method; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -/** - * Holds information about the JVM -* -* @opensearch.internal -*/ -public class ProtobufJvmInfo implements ProtobufReportingService.ProtobufInfo { - - private static ProtobufJvmInfo INSTANCE; - - static { - RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean(); - MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); - - long heapInit = memoryMXBean.getHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getInit(); - long heapMax = memoryMXBean.getHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getMax(); - long nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getInit(); - long nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getMax(); - long directMemoryMax = 0; - try { - Class vmClass = Class.forName("sun.misc.VM"); - directMemoryMax = (Long) vmClass.getMethod("maxDirectMemory").invoke(null); - } catch (Exception t) { - // ignore - } - String[] inputArguments = runtimeMXBean.getInputArguments().toArray(new String[0]); - Mem mem = new Mem(heapInit, heapMax, nonHeapInit, nonHeapMax, directMemoryMax); - - String bootClassPath; - try { - bootClassPath = runtimeMXBean.getBootClassPath(); - } catch (UnsupportedOperationException e) { - // oracle java 9 - bootClassPath = System.getProperty("sun.boot.class.path"); - if (bootClassPath == null) { - // something else - bootClassPath = ""; - } - } - String classPath = runtimeMXBean.getClassPath(); - Map systemProperties = Collections.unmodifiableMap(runtimeMXBean.getSystemProperties()); - - List gcMxBeans = ManagementFactory.getGarbageCollectorMXBeans(); - String[] gcCollectors = new String[gcMxBeans.size()]; - for (int i = 0; i < gcMxBeans.size(); i++) { - GarbageCollectorMXBean gcMxBean = gcMxBeans.get(i); - gcCollectors[i] = gcMxBean.getName(); - } - - List memoryPoolMXBeans = ManagementFactory.getMemoryPoolMXBeans(); - String[] memoryPools = new String[memoryPoolMXBeans.size()]; - for (int i = 0; i < memoryPoolMXBeans.size(); i++) { - MemoryPoolMXBean memoryPoolMXBean = memoryPoolMXBeans.get(i); - memoryPools[i] = memoryPoolMXBean.getName(); - } - - String onError = null; - String onOutOfMemoryError = null; - String useCompressedOops = "unknown"; - String useG1GC = "unknown"; - long g1RegisionSize = -1; - String useSerialGC = "unknown"; - long configuredInitialHeapSize = -1; - long configuredMaxHeapSize = -1; - try { - @SuppressWarnings("unchecked") - Class clazz = (Class) Class.forName( - "com.sun.management.HotSpotDiagnosticMXBean" - ); - Class vmOptionClazz = Class.forName("com.sun.management.VMOption"); - PlatformManagedObject hotSpotDiagnosticMXBean = ManagementFactory.getPlatformMXBean(clazz); - Method vmOptionMethod = clazz.getMethod("getVMOption", String.class); - Method valueMethod = vmOptionClazz.getMethod("getValue"); - - try { - Object onErrorObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnError"); - onError = (String) valueMethod.invoke(onErrorObject); - } catch (Exception ignored) {} - - try { - Object onOutOfMemoryErrorObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "OnOutOfMemoryError"); - onOutOfMemoryError = (String) valueMethod.invoke(onOutOfMemoryErrorObject); - } catch (Exception ignored) {} - - try { - Object useCompressedOopsVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops"); - useCompressedOops = (String) valueMethod.invoke(useCompressedOopsVmOptionObject); - } catch (Exception ignored) {} - - try { - Object useG1GCVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseG1GC"); - useG1GC = (String) valueMethod.invoke(useG1GCVmOptionObject); - Object regionSizeVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "G1HeapRegionSize"); - g1RegisionSize = Long.parseLong((String) valueMethod.invoke(regionSizeVmOptionObject)); - } catch (Exception ignored) {} - - try { - Object initialHeapSizeVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "InitialHeapSize"); - configuredInitialHeapSize = Long.parseLong((String) valueMethod.invoke(initialHeapSizeVmOptionObject)); - } catch (Exception ignored) {} - - try { - Object maxHeapSizeVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "MaxHeapSize"); - configuredMaxHeapSize = Long.parseLong((String) valueMethod.invoke(maxHeapSizeVmOptionObject)); - } catch (Exception ignored) {} - - try { - Object useSerialGCVmOptionObject = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseSerialGC"); - useSerialGC = (String) valueMethod.invoke(useSerialGCVmOptionObject); - } catch (Exception ignored) {} - - } catch (Exception ignored) { - - } - - final boolean bundledJdk = Booleans.parseBoolean(System.getProperty("opensearch.bundled_jdk", Boolean.FALSE.toString())); - final Boolean usingBundledJdk = bundledJdk ? usingBundledJdk() : null; - - INSTANCE = new ProtobufJvmInfo( - JvmPid.getPid(), - System.getProperty("java.version"), - runtimeMXBean.getVmName(), - runtimeMXBean.getVmVersion(), - runtimeMXBean.getVmVendor(), - bundledJdk, - usingBundledJdk, - runtimeMXBean.getStartTime(), - configuredInitialHeapSize, - configuredMaxHeapSize, - mem, - inputArguments, - bootClassPath, - classPath, - systemProperties, - gcCollectors, - memoryPools, - onError, - onOutOfMemoryError, - useCompressedOops, - useG1GC, - useSerialGC, - g1RegisionSize - ); - } - - @SuppressForbidden(reason = "PathUtils#get") - private static boolean usingBundledJdk() { - /* - * We are using the bundled JDK if java.home is the jdk sub-directory of our working directory. This is because we always set - * the working directory of Elasticsearch to home, and the bundled JDK is in the jdk sub-directory there. - */ - final String javaHome = System.getProperty("java.home"); - final String userDir = System.getProperty("user.dir"); - if (Constants.MAC_OS_X) { - return PathUtils.get(javaHome).equals(PathUtils.get(userDir).resolve("jdk.app/Contents/Home").toAbsolutePath()); - } else { - return PathUtils.get(javaHome).equals(PathUtils.get(userDir).resolve("jdk").toAbsolutePath()); - } - } - - public static ProtobufJvmInfo jvmInfo() { - SecurityManager sm = System.getSecurityManager(); - if (sm != null) { - sm.checkPermission(new ManagementPermission("monitor")); - sm.checkPropertyAccess("*"); - } - return INSTANCE; - } - - private final long pid; - private final String version; - private final String vmName; - private final String vmVersion; - private final String vmVendor; - private final boolean bundledJdk; - private final Boolean usingBundledJdk; - private final long startTime; - private final long configuredInitialHeapSize; - private final long configuredMaxHeapSize; - private final Mem mem; - private final String[] inputArguments; - private final String bootClassPath; - private final String classPath; - private final Map systemProperties; - private final String[] gcCollectors; - private final String[] memoryPools; - private final String onError; - private final String onOutOfMemoryError; - private final String useCompressedOops; - private final String useG1GC; - private final String useSerialGC; - private final long g1RegionSize; - - private ProtobufJvmInfo( - long pid, - String version, - String vmName, - String vmVersion, - String vmVendor, - boolean bundledJdk, - Boolean usingBundledJdk, - long startTime, - long configuredInitialHeapSize, - long configuredMaxHeapSize, - Mem mem, - String[] inputArguments, - String bootClassPath, - String classPath, - Map systemProperties, - String[] gcCollectors, - String[] memoryPools, - String onError, - String onOutOfMemoryError, - String useCompressedOops, - String useG1GC, - String useSerialGC, - long g1RegionSize - ) { - this.pid = pid; - this.version = version; - this.vmName = vmName; - this.vmVersion = vmVersion; - this.vmVendor = vmVendor; - this.bundledJdk = bundledJdk; - this.usingBundledJdk = usingBundledJdk; - this.startTime = startTime; - this.configuredInitialHeapSize = configuredInitialHeapSize; - this.configuredMaxHeapSize = configuredMaxHeapSize; - this.mem = mem; - this.inputArguments = inputArguments; - this.bootClassPath = bootClassPath; - this.classPath = classPath; - this.systemProperties = systemProperties; - this.gcCollectors = gcCollectors; - this.memoryPools = memoryPools; - this.onError = onError; - this.onOutOfMemoryError = onOutOfMemoryError; - this.useCompressedOops = useCompressedOops; - this.useG1GC = useG1GC; - this.useSerialGC = useSerialGC; - this.g1RegionSize = g1RegionSize; - } - - public ProtobufJvmInfo(CodedInputStream in) throws IOException { - ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); - pid = in.readInt64(); - version = in.readString(); - vmName = in.readString(); - vmVersion = in.readString(); - vmVendor = in.readString(); - bundledJdk = in.readBool(); - usingBundledJdk = protobufStreamInput.readOptionalBoolean(); - startTime = in.readInt64(); - inputArguments = new String[in.readInt32()]; - for (int i = 0; i < inputArguments.length; i++) { - inputArguments[i] = in.readString(); - } - bootClassPath = in.readString(); - classPath = in.readString(); - systemProperties = protobufStreamInput.readMap(CodedInputStream::readString, CodedInputStream::readString); - mem = new Mem(in); - gcCollectors = protobufStreamInput.readStringArray(); - memoryPools = protobufStreamInput.readStringArray(); - useCompressedOops = in.readString(); - // the following members are only used locally for bootstrap checks, never serialized nor printed out - this.configuredMaxHeapSize = -1; - this.configuredInitialHeapSize = -1; - this.onError = null; - this.onOutOfMemoryError = null; - this.useG1GC = "unknown"; - this.useSerialGC = "unknown"; - this.g1RegionSize = -1; - } - - @Override - public void writeTo(CodedOutputStream out) throws IOException { - ProtobufStreamOutput protobufStreamOutput = new ProtobufStreamOutput(out); - out.writeInt64NoTag(pid); - out.writeStringNoTag(version); - out.writeStringNoTag(vmName); - out.writeStringNoTag(vmVersion); - out.writeStringNoTag(vmVendor); - out.writeBoolNoTag(bundledJdk); - protobufStreamOutput.writeOptionalBoolean(usingBundledJdk); - out.writeInt64NoTag(startTime); - out.writeInt32NoTag(inputArguments.length); - for (String inputArgument : inputArguments) { - out.writeStringNoTag(inputArgument); - } - out.writeStringNoTag(bootClassPath); - out.writeStringNoTag(classPath); - out.writeInt32NoTag(this.systemProperties.size()); - for (Map.Entry entry : systemProperties.entrySet()) { - out.writeStringNoTag(entry.getKey()); - out.writeStringNoTag(entry.getValue()); - } - mem.writeTo(out); - protobufStreamOutput.writeStringArray(gcCollectors); - protobufStreamOutput.writeStringArray(memoryPools); - out.writeStringNoTag(useCompressedOops); - } - - /** - * The process id. - */ - public long pid() { - return this.pid; - } - - /** - * The process id. - */ - public long getPid() { - return pid; - } - - public String version() { - return this.version; - } - - public String getVersion() { - return this.version; - } - - public int versionAsInteger() { - try { - int i = 0; - StringBuilder sVersion = new StringBuilder(); - for (; i < version.length(); i++) { - if (!Character.isDigit(version.charAt(i)) && version.charAt(i) != '.') { - break; - } - if (version.charAt(i) != '.') { - sVersion.append(version.charAt(i)); - } - } - if (i == 0) { - return -1; - } - return Integer.parseInt(sVersion.toString()); - } catch (Exception e) { - return -1; - } - } - - public int versionUpdatePack() { - try { - int i = 0; - StringBuilder sVersion = new StringBuilder(); - for (; i < version.length(); i++) { - if (!Character.isDigit(version.charAt(i)) && version.charAt(i) != '.') { - break; - } - if (version.charAt(i) != '.') { - sVersion.append(version.charAt(i)); - } - } - if (i == 0) { - return -1; - } - Integer.parseInt(sVersion.toString()); - int from; - if (version.charAt(i) == '_') { - // 1.7.0_4 - from = ++i; - } else if (version.charAt(i) == '-' && version.charAt(i + 1) == 'u') { - // 1.7.0-u2-b21 - i = i + 2; - from = i; - } else { - return -1; - } - for (; i < version.length(); i++) { - if (!Character.isDigit(version.charAt(i)) && version.charAt(i) != '.') { - break; - } - } - if (from == i) { - return -1; - } - return Integer.parseInt(version.substring(from, i)); - } catch (Exception e) { - return -1; - } - } - - public String getVmName() { - return this.vmName; - } - - public String getVmVersion() { - return this.vmVersion; - } - - public String getVmVendor() { - return this.vmVendor; - } - - public boolean getBundledJdk() { - return bundledJdk; - } - - public Boolean getUsingBundledJdk() { - return usingBundledJdk; - } - - public long getStartTime() { - return this.startTime; - } - - public Mem getMem() { - return this.mem; - } - - public String[] getInputArguments() { - return this.inputArguments; - } - - public String getBootClassPath() { - return this.bootClassPath; - } - - public String getClassPath() { - return this.classPath; - } - - public Map getSystemProperties() { - return this.systemProperties; - } - - public long getConfiguredInitialHeapSize() { - return configuredInitialHeapSize; - } - - public long getConfiguredMaxHeapSize() { - return configuredMaxHeapSize; - } - - public String onError() { - return onError; - } - - public String onOutOfMemoryError() { - return onOutOfMemoryError; - } - - /** - * The value of the JVM flag UseCompressedOops, if available otherwise - * "unknown". The value "unknown" indicates that an attempt was - * made to obtain the value of the flag on this JVM and the attempt - * failed. - * - * @return the value of the JVM flag UseCompressedOops or "unknown" - */ - public String useCompressedOops() { - return this.useCompressedOops; - } - - public String useG1GC() { - return this.useG1GC; - } - - public String useSerialGC() { - return this.useSerialGC; - } - - public long getG1RegionSize() { - return g1RegionSize; - } - - public String[] getGcCollectors() { - return gcCollectors; - } - - public String[] getMemoryPools() { - return memoryPools; - } - - /** - * Memory information. - * - * @opensearch.internal - */ - public static class Mem implements ProtobufWriteable { - - private final long heapInit; - private final long heapMax; - private final long nonHeapInit; - private final long nonHeapMax; - private final long directMemoryMax; - - public Mem(long heapInit, long heapMax, long nonHeapInit, long nonHeapMax, long directMemoryMax) { - this.heapInit = heapInit; - this.heapMax = heapMax; - this.nonHeapInit = nonHeapInit; - this.nonHeapMax = nonHeapMax; - this.directMemoryMax = directMemoryMax; - } - - public Mem(CodedInputStream in) throws IOException { - this.heapInit = in.readInt64(); - this.heapMax = in.readInt64(); - this.nonHeapInit = in.readInt64(); - this.nonHeapMax = in.readInt64(); - this.directMemoryMax = in.readInt64(); - } - - @Override - public void writeTo(CodedOutputStream out) throws IOException { - out.writeInt64NoTag(heapInit); - out.writeInt64NoTag(heapMax); - out.writeInt64NoTag(nonHeapInit); - out.writeInt64NoTag(nonHeapMax); - out.writeInt64NoTag(directMemoryMax); - } - - public ByteSizeValue getHeapInit() { - return new ByteSizeValue(heapInit); - } - - public ByteSizeValue getHeapMax() { - return new ByteSizeValue(heapMax); - } - - public ByteSizeValue getNonHeapInit() { - return new ByteSizeValue(nonHeapInit); - } - - public ByteSizeValue getNonHeapMax() { - return new ByteSizeValue(nonHeapMax); - } - - public ByteSizeValue getDirectMemoryMax() { - return new ByteSizeValue(directMemoryMax); - } - } -} diff --git a/server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmService.java b/server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmService.java new file mode 100644 index 0000000000000..63d3428086621 --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/jvm/ProtobufJvmService.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.monitor.jvm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.node.ProtobufReportingService; + +/** + * Service for monitoring the JVM + * + * @opensearch.internal + */ +public class ProtobufJvmService implements ProtobufReportingService { + + private static final Logger logger = LogManager.getLogger(ProtobufJvmService.class); + + private final JvmInfo jvmInfo; + + private final TimeValue refreshInterval; + + private ProtobufJvmStats jvmStats; + + public static final Setting REFRESH_INTERVAL_SETTING = Setting.timeSetting( + "monitor.jvm.refresh_interval", + TimeValue.timeValueSeconds(1), + TimeValue.timeValueSeconds(1), + Property.NodeScope + ); + + public ProtobufJvmService(Settings settings) { + this.jvmInfo = JvmInfo.jvmInfo(); + this.jvmStats = ProtobufJvmStats.jvmStats(); + + this.refreshInterval = REFRESH_INTERVAL_SETTING.get(settings); + + logger.debug("using refresh_interval [{}]", refreshInterval); + } + + @Override + public JvmInfo protobufInfo() { + return this.jvmInfo; + } + + public synchronized ProtobufJvmStats stats() { + if ((System.currentTimeMillis() - jvmStats.getTimestamp()) > refreshInterval.millis()) { + jvmStats = ProtobufJvmStats.jvmStats(); + } + return jvmStats; + } +} diff --git a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java index 98229941252ba..b2393a346db2e 100644 --- a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java @@ -433,6 +433,40 @@ private OsStats.Cgroup.CpuStat getCgroupCpuAcctCpuStat(final String controlGroup return new OsStats.Cgroup.CpuStat(numberOfPeriods, numberOfTimesThrottled, timeThrottledNanos); } + private ProtobufOsStats.Cgroup.CpuStat getProtobufCgroupCpuAcctCpuStat(final String controlGroup) throws IOException { + final List lines = readSysFsCgroupCpuAcctCpuStat(controlGroup); + long numberOfPeriods = -1; + long numberOfTimesThrottled = -1; + long timeThrottledNanos = -1; + + for (final String line : lines) { + final String[] fields = line.split("\\s+"); + switch (fields[0]) { + case "nr_periods": + numberOfPeriods = Long.parseLong(fields[1]); + break; + case "nr_throttled": + numberOfTimesThrottled = Long.parseLong(fields[1]); + break; + case "throttled_time": + timeThrottledNanos = Long.parseLong(fields[1]); + break; + } + } + if (isCpuStatWarningsLogged.getAndSet(true) == false) { + if (numberOfPeriods == -1) { + logger.warn("Expected to see nr_periods filed but found nothing"); + } + if (numberOfTimesThrottled == -1) { + logger.warn("Expected to see nr_throttled filed but found nothing"); + } + if (timeThrottledNanos == -1) { + logger.warn("Expected to see throttled_time filed but found nothing"); + } + } + return new ProtobufOsStats.Cgroup.CpuStat(numberOfPeriods, numberOfTimesThrottled, timeThrottledNanos); + } + /** * Returns the lines from {@code cpu.stat} for the control group to which the OpenSearch process belongs for the {@code cpu} * subsystem. These lines represent the CPU time statistics and have the form @@ -592,6 +626,61 @@ private OsStats.Cgroup getCgroup() { } } + /** + * Basic cgroup stats. + * + * @return basic cgroup stats, or {@code null} if an I/O exception occurred reading the cgroup stats + */ + private ProtobufOsStats.Cgroup getProtobufCgroup() { + try { + if (!areCgroupStatsAvailable()) { + return null; + } else { + final Map controllerMap = getControlGroups(); + assert !controllerMap.isEmpty(); + + final String cpuAcctControlGroup = controllerMap.get("cpuacct"); + if (cpuAcctControlGroup == null) { + logger.debug("no [cpuacct] data found in cgroup stats"); + return null; + } + final long cgroupCpuAcctUsageNanos = getCgroupCpuAcctUsageNanos(cpuAcctControlGroup); + + final String cpuControlGroup = controllerMap.get("cpu"); + if (cpuControlGroup == null) { + logger.debug("no [cpu] data found in cgroup stats"); + return null; + } + final long cgroupCpuAcctCpuCfsPeriodMicros = getCgroupCpuAcctCpuCfsPeriodMicros(cpuControlGroup); + final long cgroupCpuAcctCpuCfsQuotaMicros = getCgroupCpuAcctCpuCfsQuotaMicros(cpuControlGroup); + final ProtobufOsStats.Cgroup.CpuStat cpuStat = getProtobufCgroupCpuAcctCpuStat(cpuControlGroup); + + final String memoryControlGroup = controllerMap.get("memory"); + if (memoryControlGroup == null) { + logger.debug("no [memory] data found in cgroup stats"); + return null; + } + final String cgroupMemoryLimitInBytes = getCgroupMemoryLimitInBytes(memoryControlGroup); + final String cgroupMemoryUsageInBytes = getCgroupMemoryUsageInBytes(memoryControlGroup); + + return new ProtobufOsStats.Cgroup( + cpuAcctControlGroup, + cgroupCpuAcctUsageNanos, + cpuControlGroup, + cgroupCpuAcctCpuCfsPeriodMicros, + cgroupCpuAcctCpuCfsQuotaMicros, + cpuStat, + memoryControlGroup, + cgroupMemoryLimitInBytes, + cgroupMemoryUsageInBytes + ); + } + } catch (final IOException e) { + logger.debug("error reading control group stats", e); + return null; + } + } + private static class OsProbeHolder { private static final OsProbe INSTANCE = new OsProbe(); } @@ -625,6 +714,18 @@ OsInfo osInfo(long refreshInterval, int allocatedProcessors) throws IOException ); } + ProtobufOsInfo protobufOsInfo(long refreshInterval, int allocatedProcessors) throws IOException { + return new ProtobufOsInfo( + refreshInterval, + Runtime.getRuntime().availableProcessors(), + allocatedProcessors, + Constants.OS_NAME, + getPrettyName(), + Constants.OS_ARCH, + Constants.OS_VERSION + ); + } + private String getPrettyName() throws IOException { // TODO: return a prettier name on non-Linux OS if (Constants.LINUX) { @@ -696,6 +797,14 @@ public OsStats osStats() { return new OsStats(System.currentTimeMillis(), cpu, mem, swap, cgroup); } + public ProtobufOsStats protobufOsStats() { + final ProtobufOsStats.Cpu cpu = new ProtobufOsStats.Cpu(getSystemCpuPercent(), getSystemLoadAverage()); + final ProtobufOsStats.Mem mem = new ProtobufOsStats.Mem(getTotalPhysicalMemorySize(), getFreePhysicalMemorySize()); + final ProtobufOsStats.Swap swap = new ProtobufOsStats.Swap(getTotalSwapSpaceSize(), getFreeSwapSpaceSize()); + final ProtobufOsStats.Cgroup cgroup = Constants.LINUX ? getProtobufCgroup() : null; + return new ProtobufOsStats(System.currentTimeMillis(), cpu, mem, swap, cgroup); + } + /** * Returns a given method of the OperatingSystemMXBean, or null if the method is not found or unavailable. */ diff --git a/server/src/main/java/org/opensearch/monitor/os/ProtobufOsInfo.java b/server/src/main/java/org/opensearch/monitor/os/ProtobufOsInfo.java index dc898d991626d..ff029fe223cc7 100644 --- a/server/src/main/java/org/opensearch/monitor/os/ProtobufOsInfo.java +++ b/server/src/main/java/org/opensearch/monitor/os/ProtobufOsInfo.java @@ -13,6 +13,9 @@ import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.ToXContent.Params; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.ProtobufReportingService; import java.io.IOException; @@ -100,4 +103,38 @@ public String getArch() { public String getVersion() { return version; } + + static final class Fields { + static final String OS = "os"; + static final String NAME = "name"; + static final String PRETTY_NAME = "pretty_name"; + static final String ARCH = "arch"; + static final String VERSION = "version"; + static final String REFRESH_INTERVAL = "refresh_interval"; + static final String REFRESH_INTERVAL_IN_MILLIS = "refresh_interval_in_millis"; + static final String AVAILABLE_PROCESSORS = "available_processors"; + static final String ALLOCATED_PROCESSORS = "allocated_processors"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.OS); + builder.humanReadableField(Fields.REFRESH_INTERVAL_IN_MILLIS, Fields.REFRESH_INTERVAL, new TimeValue(refreshInterval)); + if (name != null) { + builder.field(Fields.NAME, name); + } + if (prettyName != null) { + builder.field(Fields.PRETTY_NAME, prettyName); + } + if (arch != null) { + builder.field(Fields.ARCH, arch); + } + if (version != null) { + builder.field(Fields.VERSION, version); + } + builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors); + builder.field(Fields.ALLOCATED_PROCESSORS, allocatedProcessors); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/opensearch/monitor/os/ProtobufOsService.java b/server/src/main/java/org/opensearch/monitor/os/ProtobufOsService.java new file mode 100644 index 0000000000000..37ecaf2b5269f --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/os/ProtobufOsService.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.monitor.os; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.SingleObjectCache; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.node.ProtobufReportingService; + +import java.io.IOException; + +/** + * Service for the Operating System + * + * @opensearch.internal + */ +public class ProtobufOsService implements ProtobufReportingService { + + private static final Logger logger = LogManager.getLogger(ProtobufOsService.class); + + private final OsProbe probe; + private final ProtobufOsInfo info; + private final SingleObjectCache osStatsCache; + + public static final Setting REFRESH_INTERVAL_SETTING = Setting.timeSetting( + "monitor.os.refresh_interval", + TimeValue.timeValueSeconds(1), + TimeValue.timeValueSeconds(1), + Property.NodeScope + ); + + public ProtobufOsService(Settings settings) throws IOException { + this.probe = OsProbe.getInstance(); + TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings); + this.info = probe.protobufOsInfo(refreshInterval.millis(), OpenSearchExecutors.allocatedProcessors(settings)); + this.osStatsCache = new OsStatsCache(refreshInterval, probe.protobufOsStats()); + logger.debug("using refresh_interval [{}]", refreshInterval); + } + + @Override + public ProtobufOsInfo protobufInfo() { + return this.info; + } + + public synchronized ProtobufOsStats stats() { + return osStatsCache.getOrRefresh(); + } + + private class OsStatsCache extends SingleObjectCache { + OsStatsCache(TimeValue interval, ProtobufOsStats initValue) { + super(interval, initValue); + } + + @Override + protected ProtobufOsStats refresh() { + return probe.protobufOsStats(); + } + } +} diff --git a/server/src/main/java/org/opensearch/monitor/process/ProcessProbe.java b/server/src/main/java/org/opensearch/monitor/process/ProcessProbe.java index c2f83003d2c04..aa3b535cf0d30 100644 --- a/server/src/main/java/org/opensearch/monitor/process/ProcessProbe.java +++ b/server/src/main/java/org/opensearch/monitor/process/ProcessProbe.java @@ -147,12 +147,22 @@ public ProcessInfo processInfo(long refreshInterval) { return new ProcessInfo(jvmInfo().pid(), BootstrapInfo.isMemoryLocked(), refreshInterval); } + public ProtobufProcessInfo protobufProcessInfo(long refreshInterval) { + return new ProtobufProcessInfo(jvmInfo().pid(), BootstrapInfo.isMemoryLocked(), refreshInterval); + } + public ProcessStats processStats() { ProcessStats.Cpu cpu = new ProcessStats.Cpu(getProcessCpuPercent(), getProcessCpuTotalTime()); ProcessStats.Mem mem = new ProcessStats.Mem(getTotalVirtualMemorySize()); return new ProcessStats(System.currentTimeMillis(), getOpenFileDescriptorCount(), getMaxFileDescriptorCount(), cpu, mem); } + public ProtobufProcessStats processProtobufStats() { + ProtobufProcessStats.Cpu cpu = new ProtobufProcessStats.Cpu(getProcessCpuPercent(), getProcessCpuTotalTime()); + ProtobufProcessStats.Mem mem = new ProtobufProcessStats.Mem(getTotalVirtualMemorySize()); + return new ProtobufProcessStats(System.currentTimeMillis(), getOpenFileDescriptorCount(), getMaxFileDescriptorCount(), cpu, mem); + } + /** * Returns a given method of the OperatingSystemMXBean, * or null if the method is not found or unavailable. diff --git a/server/src/main/java/org/opensearch/monitor/process/ProtobufProcessInfo.java b/server/src/main/java/org/opensearch/monitor/process/ProtobufProcessInfo.java index ea7b1960c0372..7c9895411fbd1 100644 --- a/server/src/main/java/org/opensearch/monitor/process/ProtobufProcessInfo.java +++ b/server/src/main/java/org/opensearch/monitor/process/ProtobufProcessInfo.java @@ -10,6 +10,9 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.ProtobufReportingService; import java.io.IOException; @@ -62,4 +65,22 @@ public long getId() { public boolean isMlockall() { return mlockall; } + + static final class Fields { + static final String PROCESS = "process"; + static final String REFRESH_INTERVAL = "refresh_interval"; + static final String REFRESH_INTERVAL_IN_MILLIS = "refresh_interval_in_millis"; + static final String ID = "id"; + static final String MLOCKALL = "mlockall"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.PROCESS); + builder.humanReadableField(Fields.REFRESH_INTERVAL_IN_MILLIS, Fields.REFRESH_INTERVAL, new TimeValue(refreshInterval)); + builder.field(Fields.ID, id); + builder.field(Fields.MLOCKALL, mlockall); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/opensearch/monitor/process/ProtobufProcessService.java b/server/src/main/java/org/opensearch/monitor/process/ProtobufProcessService.java new file mode 100644 index 0000000000000..6de862abc7e9e --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/process/ProtobufProcessService.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.monitor.process; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.SingleObjectCache; +import org.opensearch.node.ProtobufReportingService; + +/** + * The service for the process + * + * @opensearch.internal + */ +public final class ProtobufProcessService implements ProtobufReportingService { + + private static final Logger logger = LogManager.getLogger(ProtobufProcessService.class); + + private final ProcessProbe probe; + private final ProtobufProcessInfo info; + private final SingleObjectCache processStatsCache; + + public static final Setting REFRESH_INTERVAL_SETTING = Setting.timeSetting( + "monitor.process.refresh_interval", + TimeValue.timeValueSeconds(1), + TimeValue.timeValueSeconds(1), + Property.NodeScope + ); + + public ProtobufProcessService(Settings settings) { + this.probe = ProcessProbe.getInstance(); + final TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings); + processStatsCache = new ProcessStatsCache(refreshInterval, probe.processProtobufStats()); + this.info = probe.protobufProcessInfo(refreshInterval.millis()); + logger.debug("using refresh_interval [{}]", refreshInterval); + } + + @Override + public ProtobufProcessInfo protobufInfo() { + return this.info; + } + + public ProtobufProcessStats stats() { + return processStatsCache.getOrRefresh(); + } + + private class ProcessStatsCache extends SingleObjectCache { + ProcessStatsCache(TimeValue interval, ProtobufProcessStats initValue) { + super(interval, initValue); + } + + @Override + protected ProtobufProcessStats refresh() { + return probe.processProtobufStats(); + } + } +} diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 60c0d4c75879b..0ec80f0bfda21 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -114,7 +114,7 @@ import org.opensearch.cluster.metadata.TemplateUpgradeService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.BatchedRerouteService; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.DiskThresholdMonitor; @@ -184,6 +184,7 @@ import org.opensearch.indices.store.IndicesStore; import org.opensearch.ingest.IngestService; import org.opensearch.monitor.MonitorService; +import org.opensearch.monitor.ProtobufMonitorService; import org.opensearch.monitor.fs.FsHealthService; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.persistent.PersistentTasksClusterService; @@ -558,7 +559,8 @@ protected Node( } client = new NodeClient(settings, threadPool); - // final ProtobufThreadPool protobufThreadPool = new ProtobufThreadPool(settings, runnableTaskListener, executorBuilders.toArray(new ProtobufExecutorBuilder[0])); + // final ProtobufThreadPool protobufThreadPool = new ProtobufThreadPool(settings, runnableTaskListener, + // executorBuilders.toArray(new ProtobufExecutorBuilder[0])); // resourcesToClose.add(() -> ProtobufThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); // final ResourceWatcherService resourceWatcherServiceProtobuf = new ResourceWatcherService(settings, protobufThreadPool); // resourcesToClose.add(resourceWatcherServiceProtobuf); @@ -567,7 +569,7 @@ protected Node( // resourcesToClose.add(() -> HeaderWarning.removeThreadContext(protobufThreadPool.getThreadContext())); // for (final ProtobufExecutorBuilder builder : protobufThreadPool.builders()) { - // additionalSettings.addAll(builder.getRegisteredSettings()); + // additionalSettings.addAll(builder.getRegisteredSettings()); // } protobufClient = new ProtobufNodeClient(settings, threadPool); @@ -662,6 +664,12 @@ protected Node( initializeFileCache(settings, circuitBreakerService.getBreaker(CircuitBreaker.REQUEST)); final FileCacheCleaner fileCacheCleaner = new FileCacheCleaner(nodeEnvironment, fileCache); final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool, fileCache); + final ProtobufMonitorService protobufMonitorService = new ProtobufMonitorService( + settings, + nodeEnvironment, + threadPool, + fileCache + ); pluginsService.filterPlugins(CircuitBreakerPlugin.class).forEach(plugin -> { CircuitBreaker breaker = circuitBreakerService.getBreaker(plugin.getCircuitBreaker(settings).getName()); @@ -831,17 +839,17 @@ protected Node( .collect(Collectors.toList()); // ActionModule actionModule = new ActionModule( - // settings, - // clusterModule.getIndexNameExpressionResolver(), - // settingsModule.getIndexScopedSettings(), - // settingsModule.getClusterSettings(), - // settingsModule.getSettingsFilter(), - // threadPool, - // pluginsService.filterPlugins(ActionPlugin.class), - // client, - // circuitBreakerService, - // usageService, - // systemIndices + // settings, + // clusterModule.getIndexNameExpressionResolver(), + // settingsModule.getIndexScopedSettings(), + // settingsModule.getClusterSettings(), + // settingsModule.getSettingsFilter(), + // threadPool, + // pluginsService.filterPlugins(ActionPlugin.class), + // client, + // circuitBreakerService, + // usageService, + // systemIndices // ); // modules.add(actionModule); @@ -922,14 +930,14 @@ protected Node( TopNSearchTasksLogger taskConsumer = new TopNSearchTasksLogger(settings, settingsModule.getClusterSettings()); transportService.getTaskManager().registerTaskResourceConsumer(taskConsumer); // if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { - // this.extensionsManager.initializeServicesAndRestHandler( - // actionModule, - // settingsModule, - // transportService, - // clusterService, - // environment.settings(), - // client - // ); + // this.extensionsManager.initializeServicesAndRestHandler( + // actionModule, + // settingsModule, + // transportService, + // clusterService, + // environment.settings(), + // client + // ); // } final GatewayMetaState gatewayMetaState = new GatewayMetaState(); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); @@ -1078,30 +1086,7 @@ protected Node( this.protobufNodeService = new ProtobufNodeService( settings, threadPool, - monitorService, - discoveryModule.getDiscovery(), - protobufTransportService, - indicesService, - pluginsService, - circuitBreakerService, - scriptService, - httpServerTransport, - ingestService, - clusterService, - settingsModule.getSettingsFilter(), - responseCollectorService, - searchTransportService, - indexingPressureService, - searchModule.getValuesSourceRegistry().getUsageService(), - searchBackpressureService, - searchPipelineService, - fileCache, - taskCancellationMonitoringService - ); - this.protobufNodeService = new ProtobufNodeService( - settings, - threadPool, - monitorService, + protobufMonitorService, discoveryModule.getDiscovery(), protobufTransportService, indicesService, @@ -1288,8 +1273,10 @@ protected Node( this.namedWriteableRegistry = namedWriteableRegistry; ProtobufDynamicActionRegistry protobufDynamicActionRegistry = protobufActionModule.getProtobufDynamicActionRegistry(); - protobufDynamicActionRegistry.registerUnmodifiableActionMap(injector.getInstance(new Key>() { - })); + protobufDynamicActionRegistry.registerUnmodifiableActionMap( + injector.getInstance(new Key>() { + }) + ); protobufClient.initialize( protobufDynamicActionRegistry, () -> clusterService.localNode().getId(), @@ -1329,7 +1316,7 @@ protected ProtobufTransportService newProtobufTransportService( Transport transport, ThreadPool threadPool, ProtobufTransportInterceptor interceptor, - Function localNodeFactory, + Function localNodeFactory, ClusterSettings clusterSettings, Set taskHeaders ) { @@ -1419,6 +1406,11 @@ public Node start() throws NodeValidationException { assert localNodeFactory.getNode() != null; assert transportService.getLocalNode().equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; + protobufTransportService.start(); + protobufTransportService.setLocalNode(localNodeFactory.getNode()); + assert localNodeFactory.getNode() != null; + assert protobufTransportService.getLocalNode().equals(localNodeFactory.getNode()) + : "protobufTransportService has a different local node than the factory provided"; injector.getInstance(PeerRecoverySourceService.class).start(); injector.getInstance(SegmentReplicationSourceService.class).start(); @@ -1465,7 +1457,7 @@ public Node start() throws NodeValidationException { assert clusterService.localNode().equals(localNodeFactory.getNode()) : "clusterService has a different local node than the factory provided"; transportService.acceptIncomingRequests(); - // protobufTransportService.acceptIncomingRequests(); + protobufTransportService.acceptIncomingRequests(); discovery.startInitialJoin(); final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings()); configureNodeAndClusterIdStateListener(clusterService); @@ -1476,7 +1468,13 @@ public Node start() throws NodeValidationException { ClusterState clusterState = clusterService.state(); ProtobufClusterState protobufClusterState = clusterService.protobufState(); ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext()); - ProtobufClusterStateObserver protobufObserver = new ProtobufClusterStateObserver(protobufClusterState, clusterService, null, logger, thread.getThreadContext()); + ProtobufClusterStateObserver protobufObserver = new ProtobufClusterStateObserver( + protobufClusterState, + clusterService, + null, + logger, + thread.getThreadContext() + ); if (clusterState.nodes().getClusterManagerNodeId() == null) { logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout); @@ -1896,8 +1894,8 @@ DiscoveryNode getNode() { } } - private static class ProtobufLocalNodeFactory implements Function { - private final SetOnce localNode = new SetOnce<>(); + private static class ProtobufLocalNodeFactory implements Function { + private final SetOnce localNode = new SetOnce<>(); private final String persistentNodeId; private final Settings settings; @@ -1907,12 +1905,18 @@ private ProtobufLocalNodeFactory(Settings settings, String persistentNodeId) { } @Override - public ProtobufDiscoveryNode apply(ProtobufBoundTransportAddress boundTransportAddress) { - localNode.set(ProtobufDiscoveryNode.createLocal(settings, boundTransportAddress.publishAddress(), persistentNodeId)); + public DiscoveryNode apply(ProtobufBoundTransportAddress boundTransportAddress) { + localNode.set( + DiscoveryNode.createLocal( + settings, + new TransportAddress(boundTransportAddress.publishAddress().address()), + persistentNodeId + ) + ); return localNode.get(); } - ProtobufDiscoveryNode getNode() { + DiscoveryNode getNode() { assert localNode.get() != null; return localNode.get(); } diff --git a/server/src/main/java/org/opensearch/node/ProtobufNodeClosedException.java b/server/src/main/java/org/opensearch/node/ProtobufNodeClosedException.java index cf5ab03c8bb91..a07b7e39cdd0f 100644 --- a/server/src/main/java/org/opensearch/node/ProtobufNodeClosedException.java +++ b/server/src/main/java/org/opensearch/node/ProtobufNodeClosedException.java @@ -10,7 +10,7 @@ import com.google.protobuf.CodedInputStream; import org.opensearch.ProtobufOpenSearchException; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import java.io.IOException; @@ -21,7 +21,7 @@ */ public class ProtobufNodeClosedException extends ProtobufOpenSearchException { - public ProtobufNodeClosedException(ProtobufDiscoveryNode node) { + public ProtobufNodeClosedException(DiscoveryNode node) { super("node closed " + node); } diff --git a/server/src/main/java/org/opensearch/node/ProtobufNodeService.java b/server/src/main/java/org/opensearch/node/ProtobufNodeService.java index b774178bb8122..0f0b600881d78 100644 --- a/server/src/main/java/org/opensearch/node/ProtobufNodeService.java +++ b/server/src/main/java/org/opensearch/node/ProtobufNodeService.java @@ -8,13 +8,12 @@ package org.opensearch.node; -import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.common.util.io.IOUtils; import org.opensearch.Build; import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.ProtobufNodeInfo; import org.opensearch.action.admin.cluster.node.stats.ProtobufNodeStats; -import org.opensearch.action.admin.indices.stats.ProtobufCommonStatsFlags; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.search.SearchTransportService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; @@ -27,7 +26,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.ingest.IngestService; -import org.opensearch.monitor.MonitorService; +import org.opensearch.monitor.ProtobufMonitorService; import org.opensearch.plugins.PluginsService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.AggregationUsageService; @@ -48,7 +47,7 @@ public class ProtobufNodeService implements Closeable { private final Settings settings; private final ThreadPool threadPool; - private final MonitorService monitorService; + private final ProtobufMonitorService monitorService; private final ProtobufTransportService transportService; private final IndicesService indicesService; private final PluginsService pluginService; @@ -70,7 +69,7 @@ public class ProtobufNodeService implements Closeable { ProtobufNodeService( Settings settings, ThreadPool threadPool, - MonitorService monitorService, + ProtobufMonitorService monitorService, Discovery discovery, ProtobufTransportService transportService, IndicesService indicesService, @@ -131,44 +130,44 @@ public ProtobufNodeInfo info( if (settings) { builder.setSettings(settingsFilter.filter(this.settings)); } - // if (os) { - // builder.setOs(monitorService.osService().info()); - // } - // if (process) { - // builder.setProcess(monitorService.processService().info()); - // } - // if (jvm) { - // builder.setJvm(monitorService.jvmService().info()); - // } + if (os) { + builder.setOs(monitorService.osService().protobufInfo()); + } + if (process) { + builder.setProcess(monitorService.processService().protobufInfo()); + } + if (jvm) { + builder.setJvm(monitorService.jvmService().protobufInfo()); + } if (threadPool) { builder.setThreadPool(this.threadPool.protobufInfo()); } if (transport) { builder.setTransport(transportService.protobufInfo()); } - // if (http && httpServerTransport != null) { - // builder.setHttp(httpServerTransport.info()); - // } + if (http && httpServerTransport != null) { + builder.setHttp(httpServerTransport.protobufInfo()); + } // if (plugin && pluginService != null) { - // builder.setPlugins(pluginService.info()); - // } - // if (ingest && ingestService != null) { - // builder.setIngest(ingestService.info()); - // } - // if (aggs && aggregationUsageService != null) { - // builder.setAggsInfo(aggregationUsageService.info()); + // builder.setPlugins(pluginService.info()); // } + if (ingest && ingestService != null) { + builder.setIngest(ingestService.protobufInfo()); + } + if (aggs && aggregationUsageService != null) { + builder.setAggsInfo(aggregationUsageService.protobufInfo()); + } if (indices) { builder.setTotalIndexingBuffer(indicesService.getTotalIndexingBufferBytes()); } - // if (searchPipeline && searchPipelineService != null) { - // builder.setSearchPipelineInfo(searchPipelineService.info()); - // } + if (searchPipeline && searchPipelineService != null) { + builder.setProtobufSearchPipelineInfo(searchPipelineService.protobufInfo()); + } return builder.build(); } public ProtobufNodeStats stats( - ProtobufCommonStatsFlags indices, + CommonStatsFlags indices, boolean os, boolean process, boolean jvm, @@ -194,19 +193,26 @@ public ProtobufNodeStats stats( return new ProtobufNodeStats( transportService.getLocalNode(), System.currentTimeMillis(), - null, - null, - null, - null, + indices.anySet() ? indicesService.protobufStats(indices) : null, + os ? monitorService.osService().stats() : null, + process ? monitorService.processService().stats() : null, + jvm ? monitorService.jvmService().stats() : null, threadPool ? this.threadPool.protobufStats() : null, - null, + fs ? monitorService.fsService().stats() : null, transport ? transportService.stats() : null, - null, - null, - null, - null, - null, - null + http ? (httpServerTransport == null ? null : httpServerTransport.protobufStats()) : null, + circuitBreaker ? circuitBreakerService.protobufStats() : null, + script ? scriptService.protobufStats() : null, + discoveryStats ? discovery.protobufStats() : null, + ingest ? ingestService.protobufStats() : null, + adaptiveSelection ? responseCollectorService.getProtobufAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null + // scriptCache ? scriptService.cacheStats() : null, + // indexingPressure ? this.indexingPressureService.nodeStats() : null, + // shardIndexingPressure ? this.indexingPressureService.shardStats(indices) : null, + // searchBackpressure ? this.searchBackpressureService.nodeStats() : null, + // clusterManagerThrottling ? this.clusterService.getClusterManagerService().getThrottlingStats() : null, + // weightedRoutingStats ? WeightedRoutingStats.getInstance() : null, + // fileCacheStats && fileCache != null ? fileCache.fileCacheStats() : null ); } @@ -214,7 +220,7 @@ public IngestService getIngestService() { return ingestService; } - public MonitorService getMonitorService() { + public ProtobufMonitorService getMonitorService() { return monitorService; } diff --git a/server/src/main/java/org/opensearch/node/ProtobufReportingService.java b/server/src/main/java/org/opensearch/node/ProtobufReportingService.java index d6e54f245ea95..fbb90a7617148 100644 --- a/server/src/main/java/org/opensearch/node/ProtobufReportingService.java +++ b/server/src/main/java/org/opensearch/node/ProtobufReportingService.java @@ -9,6 +9,7 @@ package org.opensearch.node; import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.ToXContent; /** * Node reporting service @@ -23,7 +24,7 @@ public interface ProtobufReportingService getAllNodeStatistics() { return nodeStats; } + public Map getAllNodeStatisticsProtobuf() { + final int clientNum = nodeIdToStats.size(); + // Transform the mutable object internally used for accounting into the computed version + Map nodeStats = new HashMap<>(nodeIdToStats.size()); + nodeIdToStats.forEach((k, v) -> { nodeStats.put(k, new ProtobufComputedNodeStats(clientNum, v)); }); + return nodeStats; + } + public AdaptiveSelectionStats getAdaptiveStats(Map clientSearchConnections) { return new AdaptiveSelectionStats(clientSearchConnections, getAllNodeStatistics()); } + public ProtobufAdaptiveSelectionStats getProtobufAdaptiveStats(Map clientSearchConnections) { + return new ProtobufAdaptiveSelectionStats(clientSearchConnections, getAllNodeStatisticsProtobuf()); + } + /** * Optionally return a {@code NodeStatistics} for the given nodeid, if * response information exists for the given node. Returns an empty diff --git a/server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java b/server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java index 29620fb5ee5dd..c82e40e71166e 100644 --- a/server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/ProtobufPluginInfo.java @@ -16,6 +16,9 @@ import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; import org.opensearch.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.ToXContent.Params; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.io.InputStream; @@ -35,7 +38,7 @@ * * @opensearch.api */ -public class ProtobufPluginInfo implements ProtobufWriteable { +public class ProtobufPluginInfo implements ProtobufWriteable, ToXContentObject { public static final String OPENSEARCH_PLUGIN_PROPERTIES = "plugin-descriptor.properties"; public static final String OPENSEARCH_PLUGIN_POLICY = "plugin-security.policy"; @@ -347,4 +350,23 @@ public boolean hasNativeController() { public String getTargetFolderName() { return (this.customFolderName == null || this.customFolderName.isEmpty()) ? this.name : this.customFolderName; } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("name", name); + builder.field("version", version); + builder.field("opensearch_version", opensearchVersion); + builder.field("java_version", javaVersion); + builder.field("description", description); + builder.field("classname", classname); + builder.field("custom_foldername", customFolderName); + builder.field("extended_plugins", extendedPlugins); + builder.field("has_native_controller", hasNativeController); + } + builder.endObject(); + + return builder; + } } diff --git a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java index 00b9b2fa1fec5..bcfff383f5199 100644 --- a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java @@ -103,8 +103,10 @@ public final long getUsageCount() { @Override public final void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + logger.info("BaseRestHandler.handleRequest {}", request); // prepare the request for execution; has the side effect of touching the request parameters final RestChannelConsumer action = prepareRequest(request, client); + logger.info("BaseRestHandler.handleRequest action: {}", action); // validate unconsumed params, but we must exclude params used to format the response // use a sorted set so the unconsumed parameters appear in a reliable sorted order diff --git a/server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java b/server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java index 42dd853e87aba..6bc78abee594b 100644 --- a/server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java +++ b/server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java @@ -77,8 +77,10 @@ public final long getUsageCount() { @Override public final void handleRequest(RestRequest request, RestChannel channel, ProtobufNodeClient client) throws Exception { + logger.info("ProtobufBaseRestHandler.handleRequest {}", request); // prepare the request for execution; has the side effect of touching the request parameters final RestChannelConsumer action = prepareRequest(request, client); + logger.info("ProtobufBaseRestHandler.handleRequest action: {}", action.getClass().getName()); // validate unconsumed params, but we must exclude params used to format the response // use a sorted set so the unconsumed parameters appear in a reliable sorted order diff --git a/server/src/main/java/org/opensearch/rest/ProtobufDeprecationRestHandler.java b/server/src/main/java/org/opensearch/rest/ProtobufDeprecationRestHandler.java new file mode 100644 index 0000000000000..0b0c147d7f29d --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/ProtobufDeprecationRestHandler.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest; + +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; + +import java.util.Objects; + +/** + * {@code ProtobufDeprecationRestHandler} provides a proxy for any existing {@link ProtobufRestHandler} so that usage of the handler can be + * logged using the {@link DeprecationLogger}. + * + * @opensearch.api + */ +public class ProtobufDeprecationRestHandler implements ProtobufRestHandler { + + private final ProtobufRestHandler handler; + private final String deprecationMessage; + private final DeprecationLogger deprecationLogger; + + /** + * Create a {@link ProtobufDeprecationRestHandler} that encapsulates the {@code handler} using the {@code deprecationLogger} to log + * deprecation {@code warning}. + * + * @param handler The rest handler to deprecate (it's possible that the handler is reused with a different name!) + * @param deprecationMessage The message to warn users with when they use the {@code handler} + * @param deprecationLogger The deprecation logger + * @throws NullPointerException if any parameter except {@code deprecationMessage} is {@code null} + * @throws IllegalArgumentException if {@code deprecationMessage} is not a valid header + */ + public ProtobufDeprecationRestHandler(ProtobufRestHandler handler, String deprecationMessage, DeprecationLogger deprecationLogger) { + this.handler = Objects.requireNonNull(handler); + this.deprecationMessage = requireValidHeader(deprecationMessage); + this.deprecationLogger = Objects.requireNonNull(deprecationLogger); + } + + /** + * {@inheritDoc} + *

+ * Usage is logged via the {@link DeprecationLogger} so that the actual response can be notified of deprecation as well. + */ + @Override + public void handleRequest(RestRequest request, RestChannel channel, ProtobufNodeClient client) throws Exception { + deprecationLogger.deprecate("deprecated_route", deprecationMessage); + + handler.handleRequest(request, channel, client); + } + + @Override + public boolean supportsContentStream() { + return handler.supportsContentStream(); + } + + /** + * This does a very basic pass at validating that a header's value contains only expected characters according to RFC-5987, and those + * that it references. + *

+ * https://tools.ietf.org/html/rfc5987 + *

+ * This is only expected to be used for assertions. The idea is that only readable US-ASCII characters are expected; the rest must be + * encoded with percent encoding, which makes checking for a valid character range very simple. + * + * @param value The header value to check + * @return {@code true} if the {@code value} is not obviously wrong. + */ + public static boolean validHeaderValue(String value) { + if (Strings.hasText(value) == false) { + return false; + } + + for (int i = 0; i < value.length(); i++) { + char c = value.charAt(i); + + // 32 = ' ' (31 = unit separator); 126 = '~' (127 = DEL) + if (c < 32 || c > 126) { + return false; + } + } + + return true; + } + + /** + * Throw an exception if the {@code value} is not a {@link #validHeaderValue(String) valid header}. + * + * @param value The header value to check + * @return Always {@code value}. + * @throws IllegalArgumentException if {@code value} is not a {@link #validHeaderValue(String) valid header}. + */ + public static String requireValidHeader(String value) { + if (validHeaderValue(value) == false) { + throw new IllegalArgumentException("header value must contain only US ASCII text"); + } + + return value; + } +} diff --git a/server/src/main/java/org/opensearch/rest/RestBaseHandler.java b/server/src/main/java/org/opensearch/rest/RestBaseHandler.java index ba51a918e2427..cb9a39f394a5f 100644 --- a/server/src/main/java/org/opensearch/rest/RestBaseHandler.java +++ b/server/src/main/java/org/opensearch/rest/RestBaseHandler.java @@ -8,13 +8,11 @@ package org.opensearch.rest; -import org.opensearch.client.node.ProtobufNodeClient; import org.opensearch.core.xcontent.XContent; import org.opensearch.rest.RestRequest.Method; import java.util.Collections; import java.util.List; -import java.util.Objects; import java.util.stream.Collectors; /** @@ -59,10 +57,10 @@ default boolean allowsUnsafeBuffers() { } // /** - // * The list of {@link Route}s that this RestHandler is responsible for handling. - // */ + // * The list of {@link Route}s that this RestHandler is responsible for handling. + // */ // default List routes() { - // return Collections.emptyList(); + // return Collections.emptyList(); // } /** diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index 9710ab5497c36..50cf7dcde5019 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -280,7 +280,7 @@ private void registerProtobufHandlerNoWrap(RestRequest.Method method, String pat * Registers a REST handler with the controller. The REST handler declares the {@code method} * and {@code path} combinations. */ - public void registerHandler(final RestHandler restHandler) { + public void registerHandler(final RestHandler restHandler) { restHandler.routes().forEach(route -> registerHandler(route.getMethod(), route.getPath(), restHandler)); restHandler.deprecatedRoutes() .forEach(route -> registerAsDeprecatedHandler(route.getMethod(), route.getPath(), restHandler, route.getDeprecationMessage())); @@ -300,27 +300,24 @@ public void registerHandler(final RestHandler restHandler) { * Registers a REST handler with the controller. The REST handler declares the {@code method} * and {@code path} combinations. */ - public void registerProtobufHandler(final ProtobufRestHandler restHandler) { - System.out.println("Registering route"); - restHandler.routes().forEach(route -> System.out.println(route.getMethod() + " " + route.getPath())); + public void registerProtobufHandler(final ProtobufRestHandler restHandler) { restHandler.routes().forEach(route -> registerProtobufHandler(route.getMethod(), route.getPath(), restHandler)); // restHandler.deprecatedRoutes() - // .forEach(route -> registerAsDeprecatedHandler(route.getMethod(), route.getPath(), restHandler, route.getDeprecationMessage())); + // .forEach(route -> registerAsDeprecatedHandler(route.getMethod(), route.getPath(), restHandler, route.getDeprecationMessage())); // restHandler.replacedRoutes() - // .forEach( - // route -> registerWithDeprecatedHandler( - // route.getMethod(), - // route.getPath(), - // restHandler, - // route.getDeprecatedMethod(), - // route.getDeprecatedPath() - // ) - // ); + // .forEach( + // route -> registerWithDeprecatedHandler( + // route.getMethod(), + // route.getPath(), + // restHandler, + // route.getDeprecatedMethod(), + // route.getDeprecatedPath() + // ) + // ); } @Override public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { - System.out.println("Dispatching request"); try { tryAllHandlers(request, channel, threadContext); } catch (Exception e) { @@ -400,7 +397,6 @@ private void dispatchRequest(RestRequest request, RestChannel channel, RestHandl } private void dispatchProtobufRequest(RestRequest request, RestChannel channel, ProtobufRestHandler handler) throws Exception { - System.out.println("Dispatching protobuf request"); final int contentLength = request.content().length(); if (contentLength > 0) { final MediaType mediaType = request.getMediaType(); @@ -522,8 +518,6 @@ private void sendContentTypeErrorMessage(@Nullable List contentTypeHeade } private void tryAllHandlers(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) throws Exception { - System.out.println("Trying all handlers"); - System.out.println("Request: " + request.toString()); for (final RestHeaderDefinition restHeader : headersToCopy) { final String name = restHeader.getName(); final List headerValues = request.getAllHeaderValues(name); @@ -555,9 +549,6 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel final String rawPath = request.rawPath(); final String uri = request.uri(); final RestRequest.Method requestMethod; - System.out.println("raw path: " + rawPath); - System.out.println("uri: " + uri); - System.out.println("request method: " + request.method().toString()); try { // Resolves the HTTP method and fails if the method is invalid requestMethod = request.method(); @@ -566,22 +557,17 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel while (allHandlers.hasNext()) { final RestHandler handler; final ProtobufRestHandler protobufHandler; - System.out.println("All handlers has next"); final MethodHandlers handlers = allHandlers.next(); - System.out.println("Handlers: " + handlers); if (handlers == null) { - System.out.println("Handlers is null"); handler = null; protobufHandler = null; } else { if (rawPath.contains("protobuf")) { handler = null; protobufHandler = handlers.getProtobufHandler(requestMethod); - System.out.println("Protobuf handler: " + protobufHandler); } else { protobufHandler = null; handler = handlers.getHandler(requestMethod); - System.out.println("Handler: " + handler); } } if (handler == null && protobufHandler == null) { @@ -611,9 +597,6 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel } Iterator getAllHandlers(@Nullable Map requestParamsRef, String rawPath) { - System.out.println("Getting all handlers"); - System.out.println("Request params: " + requestParamsRef); - System.out.println("Raw path: " + rawPath); final Supplier> paramsSupplier; if (requestParamsRef == null) { paramsSupplier = () -> null; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java index 93db24d9ee7d6..ce45b1942d35a 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java @@ -17,7 +17,7 @@ import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; import org.opensearch.client.node.ProtobufNodeClient; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.ProtobufDiscoveryNodes; import org.opensearch.common.Strings; @@ -25,6 +25,7 @@ import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.transport.ProtobufTransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.http.ProtobufHttpInfo; import org.opensearch.index.cache.query.ProtobufQueryCacheStats; @@ -39,7 +40,7 @@ import org.opensearch.index.shard.ProtobufIndexingStats; import org.opensearch.indices.ProtobufNodeIndicesStats; import org.opensearch.monitor.fs.ProtobufFsInfo; -import org.opensearch.monitor.jvm.ProtobufJvmInfo; +import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.monitor.jvm.ProtobufJvmStats; import org.opensearch.monitor.os.ProtobufOsStats; import org.opensearch.monitor.process.ProtobufProcessInfo; @@ -334,16 +335,15 @@ Table buildTable( ProtobufNodesInfoResponse nodesInfo, ProtobufNodesStatsResponse nodesStats ) { - ProtobufDiscoveryNodes nodes = state.getState().nodes(); String clusterManagerId = nodes.getClusterManagerNodeId(); Table table = getTableWithHeader(req); - for (ProtobufDiscoveryNode node : nodes) { + for (DiscoveryNode node : nodes) { ProtobufNodeInfo info = nodesInfo.getNodesMap().get(node.getId()); ProtobufNodeStats stats = nodesStats.getNodesMap().get(node.getId()); - ProtobufJvmInfo jvmInfo = info == null ? null : info.getInfo(ProtobufJvmInfo.class); + JvmInfo jvmInfo = info == null ? null : info.getInfo(JvmInfo.class); ProtobufJvmStats jvmStats = stats == null ? null : stats.getJvm(); ProtobufFsInfo fsInfo = stats == null ? null : stats.getFs(); ProtobufOsStats osStats = stats == null ? null : stats.getOs(); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index b54c8955283a2..88ae0dd74c087 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -354,7 +354,6 @@ Table buildTable( NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats ) { - DiscoveryNodes nodes = state.getState().nodes(); String clusterManagerId = nodes.getClusterManagerNodeId(); Table table = getTableWithHeader(req); diff --git a/server/src/main/java/org/opensearch/script/ScriptCache.java b/server/src/main/java/org/opensearch/script/ScriptCache.java index 439953f816cbb..0c536bd7ec133 100644 --- a/server/src/main/java/org/opensearch/script/ScriptCache.java +++ b/server/src/main/java/org/opensearch/script/ScriptCache.java @@ -155,6 +155,14 @@ public ScriptContextStats stats(String context) { return scriptMetrics.stats(context); } + public ProtobufScriptStats protobufStats() { + return scriptMetrics.protobufStats(); + } + + public ProtobufScriptContextStats protobufStats(String context) { + return scriptMetrics.protobufStats(context); + } + /** * Check whether there have been too many compilations within the last minute, throwing a circuit breaking exception if so. * This is a variant of the token bucket algorithm: https://en.wikipedia.org/wiki/Token_bucket diff --git a/server/src/main/java/org/opensearch/script/ScriptMetrics.java b/server/src/main/java/org/opensearch/script/ScriptMetrics.java index fd54c757fd017..aa348ff5948c9 100644 --- a/server/src/main/java/org/opensearch/script/ScriptMetrics.java +++ b/server/src/main/java/org/opensearch/script/ScriptMetrics.java @@ -63,4 +63,17 @@ public ScriptStats stats() { public ScriptContextStats stats(String context) { return new ScriptContextStats(context, compilationsMetric.count(), cacheEvictionsMetric.count(), compilationLimitTriggered.count()); } + + public ProtobufScriptStats protobufStats() { + return new ProtobufScriptStats(compilationsMetric.count(), cacheEvictionsMetric.count(), compilationLimitTriggered.count()); + } + + public ProtobufScriptContextStats protobufStats(String context) { + return new ProtobufScriptContextStats( + context, + compilationsMetric.count(), + cacheEvictionsMetric.count(), + compilationLimitTriggered.count() + ); + } } diff --git a/server/src/main/java/org/opensearch/script/ScriptService.java b/server/src/main/java/org/opensearch/script/ScriptService.java index 11e72ee6a8b66..d34153d126646 100644 --- a/server/src/main/java/org/opensearch/script/ScriptService.java +++ b/server/src/main/java/org/opensearch/script/ScriptService.java @@ -689,6 +689,10 @@ public ScriptStats stats() { return cacheHolder.get().stats(); } + public ProtobufScriptStats protobufStats() { + return cacheHolder.get().protobufStats(); + } + public ScriptCacheStats cacheStats() { return cacheHolder.get().cacheStats(); } @@ -813,6 +817,18 @@ ScriptStats stats() { return new ScriptStats(contextStats); } + ProtobufScriptStats protobufStats() { + if (general != null) { + return general.protobufStats(); + } + List contextStats = new ArrayList<>(contextCache.size()); + for (Map.Entry> entry : contextCache.entrySet()) { + ScriptCache cache = entry.getValue().get(); + contextStats.add(cache.protobufStats(entry.getKey())); + } + return new ProtobufScriptStats(contextStats); + } + ScriptCacheStats cacheStats() { if (general != null) { return new ScriptCacheStats(general.stats()); diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationUsageService.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationUsageService.java index eba64998014e2..cdcbe8d2fba61 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationUsageService.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationUsageService.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.support; +import org.opensearch.node.ProtobufReportingService; import org.opensearch.node.ReportingService; import java.util.HashMap; @@ -43,9 +44,10 @@ * * @opensearch.internal */ -public class AggregationUsageService implements ReportingService { +public class AggregationUsageService implements ReportingService, ProtobufReportingService { private final Map> aggs; private final AggregationInfo info; + private final ProtobufAggregationInfo protobufInfo; public static final String OTHER_SUBTYPE = "other"; @@ -82,6 +84,7 @@ public AggregationUsageService build() { private AggregationUsageService(Builder builder) { this.aggs = builder.aggs; info = new AggregationInfo(aggs); + protobufInfo = new ProtobufAggregationInfo(aggs); } public void incAggregationUsage(String aggregationName, String valuesSourceType) { @@ -118,4 +121,9 @@ public Map getUsageStats() { public AggregationInfo info() { return info; } + + @Override + public ProtobufAggregationInfo protobufInfo() { + return protobufInfo; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ProtobufAggregationInfo.java b/server/src/main/java/org/opensearch/search/aggregations/support/ProtobufAggregationInfo.java index cb90457671506..d9dcc049fcf5d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ProtobufAggregationInfo.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ProtobufAggregationInfo.java @@ -10,6 +10,8 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; + +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.ProtobufReportingService; import java.io.IOException; @@ -68,4 +70,20 @@ public void writeTo(CodedOutputStream out) throws IOException { public Map> getAggregations() { return aggs; } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("aggregations"); + for (Map.Entry> e : aggs.entrySet()) { + builder.startObject(e.getKey()); + builder.startArray("types"); + for (String s : e.getValue()) { + builder.value(s); + } + builder.endArray(); + builder.endObject(); + } + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/opensearch/search/pipeline/ProtobufSearchPipelineInfo.java b/server/src/main/java/org/opensearch/search/pipeline/ProtobufSearchPipelineInfo.java index 245eaa31b7bcc..d95845f1eea77 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/ProtobufSearchPipelineInfo.java +++ b/server/src/main/java/org/opensearch/search/pipeline/ProtobufSearchPipelineInfo.java @@ -10,6 +10,8 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; + +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.ingest.ProtobufProcessorInfo; import org.opensearch.node.ProtobufReportingService; @@ -53,4 +55,16 @@ public void writeTo(CodedOutputStream out) throws IOException { public boolean containsProcessor(String type) { return processors.contains(new ProtobufProcessorInfo(type)); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("search_pipelines"); + builder.startArray("processors"); + for (ProtobufProcessorInfo info : processors) { + info.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java index 1066d836e5183..6984f774f3650 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java @@ -41,6 +41,8 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.ingest.ProtobufProcessorInfo; +import org.opensearch.node.ProtobufReportingService; import org.opensearch.node.ReportingService; import org.opensearch.plugins.SearchPipelinePlugin; import org.opensearch.script.ScriptService; @@ -63,7 +65,11 @@ * The main entry point for search pipelines. Handles CRUD operations and exposes the API to execute search pipelines * against requests and responses. */ -public class SearchPipelineService implements ClusterStateApplier, ReportingService { +public class SearchPipelineService + implements + ClusterStateApplier, + ReportingService, + ProtobufReportingService { public static final String SEARCH_PIPELINE_ORIGIN = "search_pipeline"; public static final String AD_HOC_PIPELINE_ID = "_ad_hoc_pipeline"; @@ -445,6 +451,15 @@ public SearchPipelineStats stats() { return builder.build(); } + @Override + public ProtobufSearchPipelineInfo protobufInfo() { + List processorInfoList = new ArrayList<>(); + for (Map.Entry entry : processorFactories.entrySet()) { + processorInfoList.add(new ProtobufProcessorInfo(entry.getKey())); + } + return new ProtobufSearchPipelineInfo(processorInfoList); + } + public static List getPipelines(ClusterState clusterState, String... ids) { SearchPipelineMetadata metadata = clusterState.getMetadata().custom(SearchPipelineMetadata.TYPE); return innerGetPipelines(metadata, ids); diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTask.java b/server/src/main/java/org/opensearch/tasks/ProtobufTask.java index 21165cb7736ca..779089a610d68 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTask.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTask.java @@ -16,7 +16,7 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.action.ProtobufActionResponse; import org.opensearch.action.NotifyOnceListener; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; @@ -385,11 +385,11 @@ public String getHeader(String header) { return headers.get(header); } - public ProtobufTaskResult result(ProtobufDiscoveryNode node, Exception error) throws IOException { + public ProtobufTaskResult result(DiscoveryNode node, Exception error) throws IOException { return new ProtobufTaskResult(taskInfo(node.getId(), true, true), error); } - public ProtobufTaskResult result(ProtobufDiscoveryNode node, ProtobufActionResponse response) throws IOException { + public ProtobufTaskResult result(DiscoveryNode node, ProtobufActionResponse response) throws IOException { if (response instanceof ToXContent) { return new ProtobufTaskResult(taskInfo(node.getId(), true, true), (ToXContent) response); } else { diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java index 5a716bf2a4032..388a5b3d497b0 100644 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java @@ -18,7 +18,7 @@ import org.opensearch.action.StepListener; import org.opensearch.action.support.ProtobufChannelActionListener; import org.opensearch.action.support.GroupedActionListener; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ProtobufEmptyTransportResponseHandler; import org.opensearch.transport.ProtobufTransportChannel; @@ -64,7 +64,7 @@ void cancelTaskAndDescendants(ProtobufCancellableTask task, String reason, boole logger.trace("cancelling task [{}] and its descendants", taskId); StepListener completedListener = new StepListener<>(); GroupedActionListener groupedListener = new GroupedActionListener<>(ActionListener.map(completedListener, r -> null), 3); - Collection childrenNodes = taskManager.startBanOnChildrenNodesProtobuf(task.getId(), () -> { + Collection childrenNodes = taskManager.startBanOnChildrenNodesProtobuf(task.getId(), () -> { logger.trace("child tasks of parent [{}] are completed", taskId); groupedListener.onResponse(null); }); @@ -104,7 +104,7 @@ private void setBanOnNodes( String reason, boolean waitForCompletion, ProtobufCancellableTask task, - Collection childNodes, + Collection childNodes, ActionListener listener ) { if (childNodes.isEmpty()) { @@ -118,7 +118,7 @@ private void setBanOnNodes( childNodes.size() ); final BanParentTaskRequest banRequest = BanParentTaskRequest.createSetBanParentTaskRequest(taskId, reason, waitForCompletion); - for (ProtobufDiscoveryNode node : childNodes) { + for (DiscoveryNode node : childNodes) { transportService.sendRequest( node, BAN_PARENT_ACTION_NAME, @@ -141,11 +141,11 @@ public void handleException(ProtobufTransportException exp) { } } - private void removeBanOnNodes(ProtobufCancellableTask task, Collection childNodes) { + private void removeBanOnNodes(ProtobufCancellableTask task, Collection childNodes) { final BanParentTaskRequest request = BanParentTaskRequest.createRemoveBanParentTaskRequest( new ProtobufTaskId(localNodeId(), task.getId()) ); - for (ProtobufDiscoveryNode node : childNodes) { + for (DiscoveryNode node : childNodes) { logger.trace("Sending remove ban for tasks with the parent [{}] to the node [{}]", request.parentTaskId, node); transportService.sendRequest( node, diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java deleted file mode 100644 index b53958eb290d3..0000000000000 --- a/server/src/main/java/org/opensearch/tasks/ProtobufTaskManager.java +++ /dev/null @@ -1,763 +0,0 @@ -/* -* SPDX-License-Identifier: Apache-2.0 -* -* The OpenSearch Contributors require contributions made to -* this file be licensed under the Apache-2.0 license or a -* compatible open source license. -*/ - -package org.opensearch.tasks; - -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectIntMap; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.core.Assertions; -import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; -import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; -import org.opensearch.action.ProtobufActionResponse; -import org.opensearch.action.NotifyOnceListener; -import org.opensearch.cluster.ProtobufClusterChangedEvent; -import org.opensearch.cluster.ProtobufClusterStateApplier; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; -import org.opensearch.cluster.node.ProtobufDiscoveryNodes; -import org.opensearch.common.SetOnce; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.util.concurrent.ConcurrentMapLong; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TcpChannel; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Semaphore; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Consumer; -import java.util.stream.Collectors; -import java.util.stream.StreamSupport; - -import static org.opensearch.common.unit.TimeValue.timeValueMillis; -import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; - -/** - * ProtobufTask Manager service for keeping track of currently running tasks on the nodes -* -* @opensearch.internal -*/ -public class ProtobufTaskManager implements ProtobufClusterStateApplier { - - private static final Logger logger = LogManager.getLogger(ProtobufTaskManager.class); - - private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); - - public static final String TASK_RESOURCE_CONSUMERS_ATTRIBUTES = "task_resource_consumers.enabled"; - - public static final Setting TASK_RESOURCE_CONSUMERS_ENABLED = Setting.boolSetting( - TASK_RESOURCE_CONSUMERS_ATTRIBUTES, - false, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Rest headers that are copied to the task - */ - private final List taskHeaders; - private final ThreadPool threadPool; - - private final ConcurrentMapLong tasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); - - private final ConcurrentMapLong cancellableTasks = ConcurrentCollections - .newConcurrentMapLongWithAggressiveConcurrency(); - - private final AtomicLong taskIdGenerator = new AtomicLong(); - - private final Map banedParents = new ConcurrentHashMap<>(); - - private TaskResultsService taskResultsService; - private final SetOnce taskResourceTrackingService = new SetOnce<>(); - - private volatile ProtobufDiscoveryNodes lastDiscoveryNodes = ProtobufDiscoveryNodes.EMPTY_NODES; - - private final ByteSizeValue maxHeaderSize; - private final Map channelPendingTaskTrackers = ConcurrentCollections.newConcurrentMap(); - private final SetOnce cancellationService = new SetOnce<>(); - - private volatile boolean taskResourceConsumersEnabled; - private final Set> taskResourceConsumer; - - public static ProtobufTaskManager createTaskManagerWithClusterSettings( - Settings settings, - ClusterSettings clusterSettings, - ThreadPool threadPool, - Set taskHeaders - ) { - final ProtobufTaskManager taskManager = new ProtobufTaskManager(settings, threadPool, taskHeaders); - clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_CONSUMERS_ENABLED, taskManager::setTaskResourceConsumersEnabled); - return taskManager; - } - - public ProtobufTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { - this.threadPool = threadPool; - this.taskHeaders = new ArrayList<>(taskHeaders); - this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); - this.taskResourceConsumersEnabled = TASK_RESOURCE_CONSUMERS_ENABLED.get(settings); - taskResourceConsumer = new HashSet<>(); - } - - public void registerTaskResourceConsumer(Consumer consumer) { - taskResourceConsumer.add(consumer); - } - - public void setTaskResultsService(TaskResultsService taskResultsService) { - assert this.taskResultsService == null; - this.taskResultsService = taskResultsService; - } - - public void setTaskCancellationService(ProtobufTaskCancellationService taskCancellationService) { - this.cancellationService.set(taskCancellationService); - } - - public void setTaskResourceTrackingService(ProtobufTaskResourceTrackingService taskResourceTrackingService) { - this.taskResourceTrackingService.set(taskResourceTrackingService); - } - - public void setTaskResourceConsumersEnabled(boolean taskResourceConsumersEnabled) { - this.taskResourceConsumersEnabled = taskResourceConsumersEnabled; - } - - /** - * Registers a task without parent task - */ - public ProtobufTask register(String type, String action, ProtobufTaskAwareRequest request) { - Map headers = new HashMap<>(); - long headerSize = 0; - long maxSize = maxHeaderSize.getBytes(); - ThreadContext threadContext = threadPool.getThreadContext(); - for (String key : taskHeaders) { - String httpHeader = threadContext.getHeader(key); - if (httpHeader != null) { - headerSize += key.length() * 2 + httpHeader.length() * 2; - if (headerSize > maxSize) { - throw new IllegalArgumentException("Request exceeded the maximum size of task headers " + maxHeaderSize); - } - headers.put(key, httpHeader); - } - } - ProtobufTask task = request.createTask(taskIdGenerator.incrementAndGet(), type, action, request.getParentTask(), headers); - Objects.requireNonNull(task); - assert task.getParentTaskId().equals(request.getParentTask()) : "Request [ " + request + "] didn't preserve it parentTaskId"; - if (logger.isTraceEnabled()) { - logger.trace("register {} [{}] [{}] [{}]", task.getId(), type, action, task.getDescription()); - } - - if (task.supportsResourceTracking()) { - boolean success = task.addResourceTrackingCompletionListener(new NotifyOnceListener<>() { - @Override - protected void innerOnResponse(ProtobufTask task) { - // Stop tracking the task once the last thread has been marked inactive. - if (taskResourceTrackingService.get() != null && task.supportsResourceTracking()) { - taskResourceTrackingService.get().stopTracking(task); - } - } - - @Override - protected void innerOnFailure(Exception e) { - ExceptionsHelper.reThrowIfNotNull(e); - } - }); - - if (success == false) { - logger.debug( - "failed to register a completion listener as task resource tracking has already completed [taskId={}]", - task.getId() - ); - } - } - - if (task instanceof ProtobufCancellableTask) { - registerCancellableTask(task); - } else { - ProtobufTask previousTask = tasks.put(task.getId(), task); - assert previousTask == null; - } - return task; - } - - private void registerCancellableTask(ProtobufTask task) { - ProtobufCancellableTask cancellableTask = (ProtobufCancellableTask) task; - CancellableTaskHolder holder = new CancellableTaskHolder(cancellableTask); - CancellableTaskHolder oldHolder = cancellableTasks.put(task.getId(), holder); - assert oldHolder == null; - // Check if this task was banned before we start it. The empty check is used to avoid - // computing the hash code of the parent taskId as most of the time banedParents is empty. - if (task.getParentTaskId().isSet() && banedParents.isEmpty() == false) { - String reason = banedParents.get(task.getParentTaskId()); - if (reason != null) { - try { - holder.cancel(reason); - throw new TaskCancelledException("ProtobufTask cancelled before it started: " + reason); - } finally { - // let's clean up the registration - unregister(task); - } - } - } - } - - /** - * Cancels a task - *

- * After starting cancellation on the parent task, the task manager tries to cancel all children tasks - * of the current task. Once cancellation of the children tasks is done, the listener is triggered. - * If the task is completed or unregistered from ProtobufTaskManager, then the listener is called immediately. - */ - public void cancel(ProtobufCancellableTask task, String reason, Runnable listener) { - CancellableTaskHolder holder = cancellableTasks.get(task.getId()); - if (holder != null) { - logger.trace("cancelling task with id {}", task.getId()); - holder.cancel(reason, listener); - } else { - listener.run(); - } - } - - /** - * Unregister the task - */ - public ProtobufTask unregister(ProtobufTask task) { - logger.trace("unregister task for id: {}", task.getId()); - - // Decrement the task's self-thread as part of unregistration. - task.decrementResourceTrackingThreads(); - - if (taskResourceConsumersEnabled) { - for (Consumer taskConsumer : taskResourceConsumer) { - try { - taskConsumer.accept(task); - } catch (Exception e) { - logger.error("error encountered when updating the consumer", e); - } - } - } - - if (task instanceof ProtobufCancellableTask) { - CancellableTaskHolder holder = cancellableTasks.remove(task.getId()); - if (holder != null) { - holder.finish(); - return holder.getTask(); - } else { - return null; - } - } else { - return tasks.remove(task.getId()); - } - } - - /** - * Register a node on which a child task will execute. The returned {@link Releasable} must be called - * to unregister the child node once the child task is completed or failed. - */ - public Releasable registerChildNode(long taskId, ProtobufDiscoveryNode node) { - final CancellableTaskHolder holder = cancellableTasks.get(taskId); - if (holder != null) { - logger.trace("register child node [{}] task [{}]", node, taskId); - holder.registerChildNode(node); - return Releasables.releaseOnce(() -> { - logger.trace("unregister child node [{}] task [{}]", node, taskId); - holder.unregisterChildNode(node); - }); - } - return () -> {}; - } - - public ProtobufDiscoveryNode localNode() { - return lastDiscoveryNodes.getLocalNode(); - } - - /** - * Stores the task failure - */ - public void storeResult( - ProtobufTask task, - Exception error, - ActionListener listener - ) { - ProtobufDiscoveryNode localNode = lastDiscoveryNodes.getLocalNode(); - if (localNode == null) { - // too early to store anything, shouldn't really be here - just pass the error along - listener.onFailure(error); - return; - } - final ProtobufTaskResult taskResult; - try { - taskResult = task.result(localNode, error); - } catch (IOException ex) { - logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); - listener.onFailure(ex); - return; - } - taskResultsService.storeResult(taskResult, new ActionListener() { - @Override - public void onResponse(Void aVoid) { - listener.onFailure(error); - } - - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e); - listener.onFailure(e); - } - }); - } - - /** - * Stores the task result - */ - public void storeResult( - ProtobufTask task, - Response response, - ActionListener listener - ) { - ProtobufDiscoveryNode localNode = lastDiscoveryNodes.getLocalNode(); - if (localNode == null) { - // too early to store anything, shouldn't really be here - just pass the response along - logger.warn("couldn't store response {}, the node didn't join the cluster yet", response); - listener.onResponse(response); - return; - } - final ProtobufTaskResult taskResult; - try { - taskResult = task.result(localNode, response); - } catch (IOException ex) { - logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), ex); - listener.onFailure(ex); - return; - } - - taskResultsService.storeResult(taskResult, new ActionListener() { - @Override - public void onResponse(Void aVoid) { - listener.onResponse(response); - } - - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), e); - listener.onFailure(e); - } - }); - } - - /** - * Returns the list of currently running tasks on the node - */ - public Map getTasks() { - HashMap taskHashMap = new HashMap<>(this.tasks); - for (CancellableTaskHolder holder : cancellableTasks.values()) { - taskHashMap.put(holder.getTask().getId(), holder.getTask()); - } - return Collections.unmodifiableMap(taskHashMap); - } - - /** - * Returns the list of currently running tasks on the node that can be cancelled - */ - public Map getCancellableTasks() { - HashMap taskHashMap = new HashMap<>(); - for (CancellableTaskHolder holder : cancellableTasks.values()) { - taskHashMap.put(holder.getTask().getId(), holder.getTask()); - } - return Collections.unmodifiableMap(taskHashMap); - } - - /** - * Returns a task with given id, or null if the task is not found. - */ - public ProtobufTask getTask(long id) { - ProtobufTask task = tasks.get(id); - if (task != null) { - return task; - } else { - return getCancellableTask(id); - } - } - - /** - * Returns a cancellable task with given id, or null if the task is not found. - */ - public ProtobufCancellableTask getCancellableTask(long id) { - CancellableTaskHolder holder = cancellableTasks.get(id); - if (holder != null) { - return holder.getTask(); - } else { - return null; - } - } - - /** - * Returns the number of currently banned tasks. - *

- * Will be used in task manager stats and for debugging. - */ - public int getBanCount() { - return banedParents.size(); - } - - /** - * Bans all tasks with the specified parent task from execution, cancels all tasks that are currently executing. - *

- * This method is called when a parent task that has children is cancelled. - * - * @return a list of pending cancellable child tasks - */ - public List setBan(ProtobufTaskId parentTaskId, String reason) { - logger.trace("setting ban for the parent task {} {}", parentTaskId, reason); - - // Set the ban first, so the newly created tasks cannot be registered - synchronized (banedParents) { - if (lastDiscoveryNodes.nodeExists(parentTaskId.getNodeId())) { - // Only set the ban if the node is the part of the cluster - banedParents.put(parentTaskId, reason); - } - } - return cancellableTasks.values().stream().filter(t -> t.hasParent(parentTaskId)).map(t -> t.task).collect(Collectors.toList()); - } - - /** - * Removes the ban for the specified parent task. - *

- * This method is called when a previously banned task finally cancelled - */ - public void removeBan(ProtobufTaskId parentTaskId) { - logger.trace("removing ban for the parent task {}", parentTaskId); - banedParents.remove(parentTaskId); - } - - // for testing - public Set getBannedTaskIds() { - return Collections.unmodifiableSet(banedParents.keySet()); - } - - /** - * Start rejecting new child requests as the parent task was cancelled. - * - * @param taskId the parent task id - * @param onChildTasksCompleted called when all child tasks are completed or failed - * @return the set of current nodes that have outstanding child tasks - */ - public Collection startBanOnChildrenNodes(long taskId, Runnable onChildTasksCompleted) { - final CancellableTaskHolder holder = cancellableTasks.get(taskId); - if (holder != null) { - return holder.startBan(onChildTasksCompleted); - } else { - onChildTasksCompleted.run(); - return Collections.emptySet(); - } - } - - @Override - public void applyProtobufClusterState(ProtobufClusterChangedEvent event) { - lastDiscoveryNodes = event.state().getNodes(); - if (event.nodesRemoved()) { - synchronized (banedParents) { - lastDiscoveryNodes = event.state().getNodes(); - // Remove all bans that were registered by nodes that are no longer in the cluster state - Iterator banIterator = banedParents.keySet().iterator(); - while (banIterator.hasNext()) { - ProtobufTaskId taskId = banIterator.next(); - if (lastDiscoveryNodes.nodeExists(taskId.getNodeId()) == false) { - logger.debug( - "Removing ban for the parent [{}] on the node [{}], reason: the parent node is gone", - taskId, - event.state().getNodes().getLocalNode() - ); - banIterator.remove(); - } - } - } - } - } - - /** - * Blocks the calling thread, waiting for the task to vanish from the ProtobufTaskManager. - */ - public void waitForTaskCompletion(ProtobufTask task, long untilInNanos) { - while (System.nanoTime() - untilInNanos < 0) { - if (getTask(task.getId()) == null) { - return; - } - try { - Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis()); - } catch (InterruptedException e) { - throw new OpenSearchException("Interrupted waiting for completion of [{}]", e, task); - } - } - throw new OpenSearchTimeoutException("Timed out waiting for completion of [{}]", task); - } - - /** - * Takes actions when a task is registered and its execution starts - * - * @param task getting executed. - * @return AutoCloseable to free up resources (clean up thread context) when task execution block returns - */ - public ThreadContext.StoredContext taskExecutionStarted(ProtobufTask task) { - if (taskResourceTrackingService.get() == null) return () -> {}; - - return taskResourceTrackingService.get().startTracking(task); - } - - private static class CancellableTaskHolder { - private final ProtobufCancellableTask task; - private boolean finished = false; - private List cancellationListeners = null; - private ObjectIntMap childTasksPerNode = null; - private boolean banChildren = false; - private List childTaskCompletedListeners = null; - - CancellableTaskHolder(ProtobufCancellableTask task) { - this.task = task; - } - - void cancel(String reason, Runnable listener) { - final Runnable toRun; - synchronized (this) { - if (finished) { - assert cancellationListeners == null; - toRun = listener; - } else { - toRun = () -> {}; - if (listener != null) { - if (cancellationListeners == null) { - cancellationListeners = new ArrayList<>(); - } - cancellationListeners.add(listener); - } - } - } - try { - task.cancel(reason); - } finally { - if (toRun != null) { - toRun.run(); - } - } - } - - void cancel(String reason) { - task.cancel(reason); - } - - /** - * Marks task as finished. - */ - public void finish() { - final List listeners; - synchronized (this) { - this.finished = true; - if (cancellationListeners != null) { - listeners = cancellationListeners; - cancellationListeners = null; - } else { - listeners = Collections.emptyList(); - } - } - // We need to call the listener outside of the synchronised section to avoid potential bottle necks - // in the listener synchronization - notifyListeners(listeners); - } - - private void notifyListeners(List listeners) { - assert Thread.holdsLock(this) == false; - Exception rootException = null; - for (Runnable listener : listeners) { - try { - listener.run(); - } catch (RuntimeException inner) { - rootException = ExceptionsHelper.useOrSuppress(rootException, inner); - } - } - ExceptionsHelper.reThrowIfNotNull(rootException); - } - - public boolean hasParent(ProtobufTaskId parentTaskId) { - return task.getParentTaskId().equals(parentTaskId); - } - - public ProtobufCancellableTask getTask() { - return task; - } - - synchronized void registerChildNode(ProtobufDiscoveryNode node) { - if (banChildren) { - throw new TaskCancelledException("The parent task was cancelled, shouldn't start any child tasks"); - } - if (childTasksPerNode == null) { - childTasksPerNode = new ObjectIntHashMap<>(); - } - childTasksPerNode.addTo(node, 1); - } - - void unregisterChildNode(ProtobufDiscoveryNode node) { - final List listeners; - synchronized (this) { - if (childTasksPerNode.addTo(node, -1) == 0) { - childTasksPerNode.remove(node); - } - if (childTasksPerNode.isEmpty() && this.childTaskCompletedListeners != null) { - listeners = childTaskCompletedListeners; - childTaskCompletedListeners = null; - } else { - listeners = Collections.emptyList(); - } - } - notifyListeners(listeners); - } - - Set startBan(Runnable onChildTasksCompleted) { - final Set pendingChildNodes; - final Runnable toRun; - synchronized (this) { - banChildren = true; - if (childTasksPerNode == null) { - pendingChildNodes = Collections.emptySet(); - } else { - pendingChildNodes = StreamSupport.stream(childTasksPerNode.spliterator(), false) - .map(e -> e.key) - .collect(Collectors.toSet()); - } - if (pendingChildNodes.isEmpty()) { - assert childTaskCompletedListeners == null; - toRun = onChildTasksCompleted; - } else { - toRun = () -> {}; - if (childTaskCompletedListeners == null) { - childTaskCompletedListeners = new ArrayList<>(); - } - childTaskCompletedListeners.add(onChildTasksCompleted); - } - } - toRun.run(); - return pendingChildNodes; - } - } - - /** - * Start tracking a cancellable task with its tcp channel, so if the channel gets closed we can get a set of - * pending tasks associated that channel and cancel them as these results won't be retrieved by the parent task. - * - * @return a releasable that should be called when this pending task is completed - */ - public Releasable startTrackingCancellableChannelTask(TcpChannel channel, ProtobufCancellableTask task) { - assert cancellableTasks.containsKey(task.getId()) : "task [" + task.getId() + "] is not registered yet"; - final ChannelPendingTaskTracker tracker = channelPendingTaskTrackers.compute(channel, (k, curr) -> { - if (curr == null) { - curr = new ChannelPendingTaskTracker(); - } - curr.addTask(task); - return curr; - }); - if (tracker.registered.compareAndSet(false, true)) { - channel.addCloseListener(ActionListener.wrap(r -> { - final ChannelPendingTaskTracker removedTracker = channelPendingTaskTrackers.remove(channel); - assert removedTracker == tracker; - cancelTasksOnChannelClosed(tracker.drainTasks()); - }, e -> { assert false : new AssertionError("must not be here", e); })); - } - return () -> tracker.removeTask(task); - } - - // for testing - final int numberOfChannelPendingTaskTrackers() { - return channelPendingTaskTrackers.size(); - } - - private static class ChannelPendingTaskTracker { - final AtomicBoolean registered = new AtomicBoolean(); - final Semaphore permits = Assertions.ENABLED ? new Semaphore(Integer.MAX_VALUE) : null; - final Set pendingTasks = ConcurrentCollections.newConcurrentSet(); - - void addTask(ProtobufCancellableTask task) { - assert permits.tryAcquire() : "tracker was drained"; - final boolean added = pendingTasks.add(task); - assert added : "task " + task.getId() + " is in the pending list already"; - assert releasePermit(); - } - - boolean acquireAllPermits() { - permits.acquireUninterruptibly(Integer.MAX_VALUE); - return true; - } - - boolean releasePermit() { - permits.release(); - return true; - } - - Set drainTasks() { - assert acquireAllPermits(); // do not release permits so we can't add tasks to this tracker after draining - return Collections.unmodifiableSet(pendingTasks); - } - - void removeTask(ProtobufCancellableTask task) { - final boolean removed = pendingTasks.remove(task); - assert removed : "task " + task.getId() + " is not in the pending list"; - } - } - - private void cancelTasksOnChannelClosed(Set tasks) { - if (tasks.isEmpty() == false) { - threadPool.generic().execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - logger.warn("failed to cancel tasks on channel closed", e); - } - - @Override - protected void doRun() { - for (ProtobufCancellableTask task : tasks) { - cancelTaskAndDescendants(task, "channel was closed", false, ActionListener.wrap(() -> {})); - } - } - }); - } - } - - public void cancelTaskAndDescendants( - ProtobufCancellableTask task, - String reason, - boolean waitForCompletion, - ActionListener listener - ) { - final ProtobufTaskCancellationService service = cancellationService.get(); - if (service != null) { - service.cancelTaskAndDescendants(task, reason, waitForCompletion, listener); - } else { - assert false : "ProtobufTaskCancellationService is not initialized"; - throw new IllegalStateException("ProtobufTaskCancellationService is not initialized"); - } - } -} diff --git a/server/src/main/java/org/opensearch/tasks/TaskManager.java b/server/src/main/java/org/opensearch/tasks/TaskManager.java index 484ca84501be9..c3bcb316d16e9 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/TaskManager.java @@ -49,7 +49,7 @@ import org.opensearch.cluster.ProtobufClusterStateApplier; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.ProtobufDiscoveryNodes; import org.opensearch.common.SetOnce; import org.opensearch.common.settings.ClusterSettings; @@ -139,7 +139,8 @@ public class TaskManager implements ClusterStateApplier, ProtobufClusterStateApp private final ByteSizeValue maxHeaderSize; private final Map channelPendingTaskTrackers = ConcurrentCollections.newConcurrentMap(); - private final Map protobufChannelPendingTaskTrackers = ConcurrentCollections.newConcurrentMap(); + private final Map protobufChannelPendingTaskTrackers = ConcurrentCollections + .newConcurrentMap(); private final SetOnce cancellationService = new SetOnce<>(); private final SetOnce protobufCancellationService = new SetOnce<>(); @@ -543,7 +544,7 @@ public DiscoveryNode localNode() { * Register a node on which a child task will execute. The returned {@link Releasable} must be called * to unregister the child node once the child task is completed or failed. */ - public Releasable registerProtobufChildNode(long taskId, ProtobufDiscoveryNode node) { + public Releasable registerProtobufChildNode(long taskId, DiscoveryNode node) { final ProtobufCancellableTaskHolder holder = protobufCancellableTasks.get(taskId); if (holder != null) { logger.trace("register child node [{}] task [{}]", node, taskId); @@ -556,7 +557,7 @@ public Releasable registerProtobufChildNode(long taskId, ProtobufDiscoveryNode n return () -> {}; } - public ProtobufDiscoveryNode localProtobufNode() { + public DiscoveryNode localProtobufNode() { return lastDiscoveryNodesProtobuf.getLocalNode(); } @@ -600,7 +601,7 @@ public void storeResultProtobuf( Exception error, ActionListener listener ) { - ProtobufDiscoveryNode localNode = lastDiscoveryNodesProtobuf.getLocalNode(); + DiscoveryNode localNode = lastDiscoveryNodesProtobuf.getLocalNode(); if (localNode == null) { // too early to store anything, shouldn't really be here - just pass the error along listener.onFailure(error); @@ -670,7 +671,7 @@ public void storeResultProtobuf( Response response, ActionListener listener ) { - ProtobufDiscoveryNode localNode = lastDiscoveryNodesProtobuf.getLocalNode(); + DiscoveryNode localNode = lastDiscoveryNodesProtobuf.getLocalNode(); if (localNode == null) { // too early to store anything, shouldn't really be here - just pass the response along logger.warn("couldn't store response {}, the node didn't join the cluster yet", response); @@ -853,11 +854,11 @@ public Collection startBanOnChildrenNodes(long taskId, Runnable o } } - /** - * Returns the number of currently banned tasks. - *

- * Will be used in task manager stats and for debugging. - */ + /** + * Returns the number of currently banned tasks. + *

+ * Will be used in task manager stats and for debugging. + */ public int getBanCountProtobuf() { return banedParentsProtobuf.size(); } @@ -879,7 +880,11 @@ public List setBanProtobuf(ProtobufTaskId parentTaskId, banedParentsProtobuf.put(parentTaskId, reason); } } - return protobufCancellableTasks.values().stream().filter(t -> t.hasParent(parentTaskId)).map(t -> t.task).collect(Collectors.toList()); + return protobufCancellableTasks.values() + .stream() + .filter(t -> t.hasParent(parentTaskId)) + .map(t -> t.task) + .collect(Collectors.toList()); } /** @@ -904,7 +909,7 @@ public Set getBannedTaskIdsProtobuf() { * @param onChildTasksCompleted called when all child tasks are completed or failed * @return the set of current nodes that have outstanding child tasks */ - public Collection startBanOnChildrenNodesProtobuf(long taskId, Runnable onChildTasksCompleted) { + public Collection startBanOnChildrenNodesProtobuf(long taskId, Runnable onChildTasksCompleted) { final ProtobufCancellableTaskHolder holder = protobufCancellableTasks.get(taskId); if (holder != null) { return holder.startBan(onChildTasksCompleted); @@ -1288,7 +1293,7 @@ private static class ProtobufCancellableTaskHolder { private final ProtobufCancellableTask task; private boolean finished = false; private List cancellationListeners = null; - private ObjectIntMap childTasksPerNode = null; + private ObjectIntMap childTasksPerNode = null; private boolean banChildren = false; private List childTaskCompletedListeners = null; @@ -1365,7 +1370,7 @@ public ProtobufCancellableTask getTask() { return task; } - synchronized void registerChildNode(ProtobufDiscoveryNode node) { + synchronized void registerChildNode(DiscoveryNode node) { if (banChildren) { throw new TaskCancelledException("The parent task was cancelled, shouldn't start any child tasks"); } @@ -1375,7 +1380,7 @@ synchronized void registerChildNode(ProtobufDiscoveryNode node) { childTasksPerNode.addTo(node, 1); } - void unregisterChildNode(ProtobufDiscoveryNode node) { + void unregisterChildNode(DiscoveryNode node) { final List listeners; synchronized (this) { if (childTasksPerNode.addTo(node, -1) == 0) { @@ -1391,8 +1396,8 @@ void unregisterChildNode(ProtobufDiscoveryNode node) { notifyListeners(listeners); } - Set startBan(Runnable onChildTasksCompleted) { - final Set pendingChildNodes; + Set startBan(Runnable onChildTasksCompleted) { + final Set pendingChildNodes; final Runnable toRun; synchronized (this) { banChildren = true; @@ -1416,6 +1421,7 @@ Set startBan(Runnable onChildTasksCompleted) { return pendingChildNodes; } } + /** * Start tracking a cancellable task with its tcp channel, so if the channel gets closed we can get a set of * pending tasks associated that channel and cancel them as these results won't be retrieved by the parent task. diff --git a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java index c9e3a51bb077c..0addbd9276b22 100644 --- a/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java +++ b/server/src/main/java/org/opensearch/threadpool/ProtobufThreadPoolInfo.java @@ -13,6 +13,7 @@ import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.ProtobufReportingService; import java.io.IOException; @@ -48,4 +49,18 @@ public void writeTo(CodedOutputStream out) throws IOException { public Iterator iterator() { return infos.iterator(); } + + static final class Fields { + static final String THREAD_POOL = "thread_pool"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.THREAD_POOL); + for (ThreadPool.Info info : infos) { + info.toXContent(builder, params); + } + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index fa94ec0156090..e55bfc4e5fb50 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -64,7 +64,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; diff --git a/server/src/main/java/org/opensearch/transport/InboundHandler.java b/server/src/main/java/org/opensearch/transport/InboundHandler.java index bb04f149d39a9..8632ad7102bd7 100644 --- a/server/src/main/java/org/opensearch/transport/InboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/InboundHandler.java @@ -70,6 +70,7 @@ public class InboundHandler { private final Transport.RequestHandlers requestHandlers; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; + private volatile ProtobufTransportMessageListener protobufMessageListener = ProtobufTransportMessageListener.NOOP_LISTENER; private volatile long slowLogThresholdMs = Long.MAX_VALUE; @@ -99,6 +100,14 @@ void setMessageListener(TransportMessageListener listener) { } } + void setProtobufMessageListener(ProtobufTransportMessageListener listener) { + if (protobufMessageListener == ProtobufTransportMessageListener.NOOP_LISTENER) { + protobufMessageListener = listener; + } else { + throw new IllegalStateException("Cannot set message listener twice"); + } + } + void setSlowLogThreshold(TimeValue slowLogThreshold) { this.slowLogThresholdMs = slowLogThreshold.getMillis(); } diff --git a/server/src/main/java/org/opensearch/transport/OutboundHandler.java b/server/src/main/java/org/opensearch/transport/OutboundHandler.java index 3e493267242fb..a77dad228a626 100644 --- a/server/src/main/java/org/opensearch/transport/OutboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/OutboundHandler.java @@ -71,6 +71,7 @@ final class OutboundHandler { private final ThreadPool threadPool; private final BigArrays bigArrays; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; + private volatile ProtobufTransportMessageListener protobufMessageListener = ProtobufTransportMessageListener.NOOP_LISTENER; OutboundHandler( String nodeName, @@ -212,6 +213,14 @@ void setMessageListener(TransportMessageListener listener) { } } + void setProtobufMessageListener(ProtobufTransportMessageListener listener) { + if (protobufMessageListener == ProtobufTransportMessageListener.NOOP_LISTENER) { + protobufMessageListener = listener; + } else { + throw new IllegalStateException("Cannot set message listener twice"); + } + } + /** * Internal message serializer * diff --git a/server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java b/server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java index 377451f06892d..42d5046a9be4c 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufClusterConnectionManager.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.common.util.concurrent.ConcurrentCollections; @@ -39,16 +39,14 @@ public class ProtobufClusterConnectionManager implements ProtobufConnectionManag private static final Logger logger = LogManager.getLogger(ProtobufClusterConnectionManager.class); - private final ConcurrentMap connectedNodes = ConcurrentCollections - .newConcurrentMap(); - private final ConcurrentMap> pendingConnections = ConcurrentCollections - .newConcurrentMap(); + private final ConcurrentMap connectedNodes = ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap> pendingConnections = ConcurrentCollections.newConcurrentMap(); private final AbstractRefCounted connectingRefCounter = new AbstractRefCounted("connection manager") { @Override protected void closeInternal() { - Iterator> iterator = connectedNodes.entrySet().iterator(); + Iterator> iterator = connectedNodes.entrySet().iterator(); while (iterator.hasNext()) { - Map.Entry next = iterator.next(); + Map.Entry next = iterator.next(); try { IOUtils.closeWhileHandlingException(next.getValue()); } finally { @@ -85,7 +83,7 @@ public void removeListener(ProtobufTransportConnectionListener listener) { @Override public void openConnection( - ProtobufDiscoveryNode node, + DiscoveryNode node, ProtobufConnectionProfile connectionProfile, ActionListener listener ) { @@ -100,7 +98,7 @@ public void openConnection( */ @Override public void connectToNode( - ProtobufDiscoveryNode node, + DiscoveryNode node, ProtobufConnectionProfile connectionProfile, ConnectionValidator connectionValidator, ActionListener listener @@ -180,10 +178,10 @@ public void connectToNode( * maintained by this connection manager * * @throws ProtobufNodeNotConnectedException if the node is not connected - * @see #connectToNode(ProtobufDiscoveryNode, ProtobufConnectionProfile, ConnectionValidator, ActionListener) + * @see #connectToNode(DiscoveryNode, ProtobufConnectionProfile, ConnectionValidator, ActionListener) */ @Override - public Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node) { + public Transport.ProtobufConnection getConnection(DiscoveryNode node) { Transport.ProtobufConnection connection = connectedNodes.get(node); if (connection == null) { throw new ProtobufNodeNotConnectedException(node, "Node not connected"); @@ -195,7 +193,7 @@ public Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node) { * Returns {@code true} if the node is connected. */ @Override - public boolean nodeConnected(ProtobufDiscoveryNode node) { + public boolean nodeConnected(DiscoveryNode node) { return connectedNodes.containsKey(node); } @@ -203,7 +201,7 @@ public boolean nodeConnected(ProtobufDiscoveryNode node) { * Disconnected from the given node, if not connected, will do nothing. */ @Override - public void disconnectFromNode(ProtobufDiscoveryNode node) { + public void disconnectFromNode(DiscoveryNode node) { Transport.ProtobufConnection nodeChannels = connectedNodes.remove(node); if (nodeChannels != null) { // if we found it and removed it we close @@ -220,7 +218,7 @@ public int size() { } @Override - public Set getAllConnectedNodes() { + public Set getAllConnectedNodes() { return Collections.unmodifiableSet(connectedNodes.keySet()); } @@ -250,7 +248,7 @@ private void internalClose(boolean waitForPendingConnections) { } private void internalOpenConnection( - ProtobufDiscoveryNode node, + DiscoveryNode node, ProtobufConnectionProfile connectionProfile, ActionListener listener ) { @@ -268,12 +266,7 @@ private void internalOpenConnection( })); } - private void failConnectionListeners( - ProtobufDiscoveryNode node, - RunOnce releaseOnce, - Exception e, - ListenableFuture expectedListener - ) { + private void failConnectionListeners(DiscoveryNode node, RunOnce releaseOnce, Exception e, ListenableFuture expectedListener) { ListenableFuture future = pendingConnections.remove(node); releaseOnce.run(); if (future != null) { diff --git a/server/src/main/java/org/opensearch/transport/ProtobufConnectTransportException.java b/server/src/main/java/org/opensearch/transport/ProtobufConnectTransportException.java index ebb7e37b7010b..3a8108db629bf 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufConnectTransportException.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufConnectTransportException.java @@ -10,7 +10,7 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufStreamOutput; @@ -23,29 +23,29 @@ */ public class ProtobufConnectTransportException extends ProtobufActionTransportException { - private final ProtobufDiscoveryNode node; + private final DiscoveryNode node; - public ProtobufConnectTransportException(ProtobufDiscoveryNode node, String msg) { + public ProtobufConnectTransportException(DiscoveryNode node, String msg) { this(node, msg, null, null); } - public ProtobufConnectTransportException(ProtobufDiscoveryNode node, String msg, String action) { + public ProtobufConnectTransportException(DiscoveryNode node, String msg, String action) { this(node, msg, action, null); } - public ProtobufConnectTransportException(ProtobufDiscoveryNode node, String msg, Throwable cause) { + public ProtobufConnectTransportException(DiscoveryNode node, String msg, Throwable cause) { this(node, msg, null, cause); } - public ProtobufConnectTransportException(ProtobufDiscoveryNode node, String msg, String action, Throwable cause) { - super(node == null ? null : node.getName(), node == null ? null : node.getAddress(), action, msg, cause); + public ProtobufConnectTransportException(DiscoveryNode node, String msg, String action, Throwable cause) { + super(node == null ? null : node.getName(), node == null ? null : node.getProtobufAddress(), action, msg, cause); this.node = node; } public ProtobufConnectTransportException(CodedInputStream in) throws IOException { super(in); ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); - node = protobufStreamInput.readOptionalWriteable(ProtobufDiscoveryNode::new); + node = protobufStreamInput.readOptionalWriteable(DiscoveryNode::new); } @Override @@ -55,7 +55,7 @@ public void writeTo(CodedOutputStream out) throws IOException { protobufStreamOutput.writeOptionalWriteable(node); } - public ProtobufDiscoveryNode node() { + public DiscoveryNode node() { return node; } } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java b/server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java index 05d059955d0fb..d1ed3a6812bed 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufConnectionManager.java @@ -9,7 +9,7 @@ package org.opensearch.transport; import org.opensearch.action.ActionListener; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import java.io.Closeable; import java.util.Set; @@ -27,25 +27,25 @@ public interface ProtobufConnectionManager extends Closeable { void removeListener(ProtobufTransportConnectionListener listener); void openConnection( - ProtobufDiscoveryNode node, + DiscoveryNode node, ProtobufConnectionProfile connectionProfile, ActionListener listener ); void connectToNode( - ProtobufDiscoveryNode node, + DiscoveryNode node, ProtobufConnectionProfile connectionProfile, ConnectionValidator connectionValidator, ActionListener listener ) throws ConnectTransportException; - Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node); + Transport.ProtobufConnection getConnection(DiscoveryNode node); - boolean nodeConnected(ProtobufDiscoveryNode node); + boolean nodeConnected(DiscoveryNode node); - void disconnectFromNode(ProtobufDiscoveryNode node); + void disconnectFromNode(DiscoveryNode node); - Set getAllConnectedNodes(); + Set getAllConnectedNodes(); int size(); @@ -76,14 +76,14 @@ final class DelegatingNodeConnectionListener implements ProtobufTransportConnect private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); @Override - public void onNodeDisconnected(ProtobufDiscoveryNode key, Transport.ProtobufConnection connection) { + public void onNodeDisconnected(DiscoveryNode key, Transport.ProtobufConnection connection) { for (ProtobufTransportConnectionListener listener : listeners) { listener.onNodeDisconnected(key, connection); } } @Override - public void onNodeConnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) { + public void onNodeConnected(DiscoveryNode node, Transport.ProtobufConnection connection) { for (ProtobufTransportConnectionListener listener : listeners) { listener.onNodeConnected(node, connection); } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufConnectionProfile.java b/server/src/main/java/org/opensearch/transport/ProtobufConnectionProfile.java index a2e10d03bd189..db39394a4e94c 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufConnectionProfile.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufConnectionProfile.java @@ -8,7 +8,7 @@ package org.opensearch.transport; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -84,14 +84,11 @@ public static ProtobufConnectionProfile buildDefaultConnectionProfile(Settings s builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); // if we are not cluster-manager eligible we don't need a dedicated channel to publish the state builder.addConnections( - ProtobufDiscoveryNode.isClusterManagerNode(settings) ? connectionsPerNodeState : 0, + DiscoveryNode.isClusterManagerNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE ); // if we are not a data-node we don't need any dedicated channels for recovery - builder.addConnections( - ProtobufDiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, - TransportRequestOptions.Type.RECOVERY - ); + builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY); builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG); return builder.build(); } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufNodeDisconnectedException.java b/server/src/main/java/org/opensearch/transport/ProtobufNodeDisconnectedException.java index 7a46892128218..19a2ff22dfb55 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufNodeDisconnectedException.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufNodeDisconnectedException.java @@ -9,7 +9,7 @@ package org.opensearch.transport; import com.google.protobuf.CodedInputStream; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import java.io.IOException; @@ -20,7 +20,7 @@ */ public class ProtobufNodeDisconnectedException extends ProtobufConnectTransportException { - public ProtobufNodeDisconnectedException(ProtobufDiscoveryNode node, String action) { + public ProtobufNodeDisconnectedException(DiscoveryNode node, String action) { super(node, "disconnected", action, null); } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufNodeNotConnectedException.java b/server/src/main/java/org/opensearch/transport/ProtobufNodeNotConnectedException.java index 9b90f26324fb3..1aa71c609f9d6 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufNodeNotConnectedException.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufNodeNotConnectedException.java @@ -9,7 +9,7 @@ package org.opensearch.transport; import com.google.protobuf.CodedInputStream; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import java.io.IOException; @@ -20,7 +20,7 @@ */ public class ProtobufNodeNotConnectedException extends ProtobufConnectTransportException { - public ProtobufNodeNotConnectedException(ProtobufDiscoveryNode node, String msg) { + public ProtobufNodeNotConnectedException(DiscoveryNode node, String msg) { super(node, msg, (String) null); } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java b/server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java index 365bed06c42f7..63e108b044831 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufOutboundHandler.java @@ -15,7 +15,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.NotifyOnceListener; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.lease.Releasable; @@ -78,7 +78,7 @@ void sendBytes(TcpChannel channel, BytesReference bytes, ActionListener li * objects back to the caller. */ void sendRequest( - final ProtobufDiscoveryNode node, + final DiscoveryNode node, final TcpChannel channel, final long requestId, final String action, diff --git a/server/src/main/java/org/opensearch/transport/ProtobufProxyConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/ProtobufProxyConnectionStrategy.java index 52fa5f55ad1ff..584967aa2b9bb 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufProxyConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufProxyConnectionStrategy.java @@ -13,15 +13,16 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; import org.opensearch.action.ActionListener; -import org.opensearch.cluster.ProtobufClusterName; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.ProtobufStreamInput; import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.ProtobufTransportAddress; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.core.xcontent.XContentBuilder; @@ -93,8 +94,8 @@ public class ProtobufProxyConnectionStrategy extends ProtobufRemoteConnectionStr private final int maxNumConnections; private final String configuredAddress; private final String configuredServerName; - private final Supplier address; - private final AtomicReference remoteClusterName = new AtomicReference<>(); + private final Supplier address; + private final AtomicReference remoteClusterName = new AtomicReference<>(); private final ProtobufConnectionManager.ConnectionValidator clusterNameValidator; ProtobufProxyConnectionStrategy( @@ -162,7 +163,7 @@ public class ProtobufProxyConnectionStrategy extends ProtobufRemoteConnectionStr Settings settings, int maxNumConnections, String configuredAddress, - Supplier address, + Supplier address, String configuredServerName ) { super(clusterAlias, transportService, connectionManager, settings); @@ -176,12 +177,12 @@ public class ProtobufProxyConnectionStrategy extends ProtobufRemoteConnectionStr actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { - ProtobufClusterName remote = resp.getClusterName(); + ClusterName remote = resp.getClusterName(); if (remoteClusterName.compareAndSet(null, remote)) { return null; } else { if (remoteClusterName.get().equals(remote) == false) { - ProtobufDiscoveryNode node = newConnection.getNode(); + DiscoveryNode node = newConnection.getNode(); throw new ProtobufConnectTransportException(node, "handshake failed. unexpected remote cluster name " + remote); } return null; @@ -234,7 +235,7 @@ private void performProxyConnectionProcess(ActionListener listener) { private void openConnections(ActionListener finished, int attemptNumber) { if (attemptNumber <= MAX_CONNECT_ATTEMPTS_PER_RUN) { - ProtobufTransportAddress resolved = address.get(); + TransportAddress resolved = address.get(); int remaining = maxNumConnections - connectionManager.size(); ActionListener compositeListener = new ActionListener() { @@ -270,9 +271,9 @@ public void onFailure(Exception e) { } else { attributes = Collections.singletonMap("server_name", configuredServerName); } - ProtobufDiscoveryNode node = new ProtobufDiscoveryNode( + DiscoveryNode node = new DiscoveryNode( id, - resolved, + new TransportAddress(resolved.address()), attributes, DiscoveryNodeRole.BUILT_IN_ROLES, Version.CURRENT.minimumCompatibilityVersion() @@ -316,8 +317,8 @@ public void onFailure(Exception e) { } } - private static ProtobufTransportAddress resolveAddress(String address) { - return new ProtobufTransportAddress(parseConfiguredAddress(address)); + private static TransportAddress resolveAddress(String address) { + return new TransportAddress(parseConfiguredAddress(address)); } /** diff --git a/server/src/main/java/org/opensearch/transport/ProtobufReceiveTimeoutTransportException.java b/server/src/main/java/org/opensearch/transport/ProtobufReceiveTimeoutTransportException.java index 28fb5baa28b2b..704ab50753ee6 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufReceiveTimeoutTransportException.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufReceiveTimeoutTransportException.java @@ -9,7 +9,7 @@ package org.opensearch.transport; import com.google.protobuf.CodedInputStream; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import java.io.IOException; @@ -20,8 +20,8 @@ */ public class ProtobufReceiveTimeoutTransportException extends ProtobufActionTransportException { - public ProtobufReceiveTimeoutTransportException(ProtobufDiscoveryNode node, String action, String msg) { - super(node.getName(), node.getAddress(), action, msg, null); + public ProtobufReceiveTimeoutTransportException(DiscoveryNode node, String action, String msg) { + super(node.getName(), node.getProtobufAddress(), action, msg, null); } public ProtobufReceiveTimeoutTransportException(CodedInputStream in) throws IOException { diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java index a5317b0fcee62..a5d6fec24c4d3 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareClient.java @@ -15,7 +15,7 @@ import org.opensearch.action.ProtobufActionResponse; import org.opensearch.client.ProtobufClient; import org.opensearch.client.support.ProtobufAbstractClient; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.threadpool.ThreadPool; @@ -30,12 +30,7 @@ final class ProtobufRemoteClusterAwareClient extends ProtobufAbstractClient { private final String clusterAlias; private final RemoteClusterService remoteClusterService; - ProtobufRemoteClusterAwareClient( - Settings settings, - ThreadPool threadPool, - ProtobufTransportService service, - String clusterAlias - ) { + ProtobufRemoteClusterAwareClient(Settings settings, ThreadPool threadPool, ProtobufTransportService service, String clusterAlias) { super(settings, threadPool); this.service = service; this.clusterAlias = clusterAlias; @@ -50,8 +45,8 @@ protected { Transport.ProtobufConnection connection; - if (request instanceof ProtobufRemoteClusterAwareRequest) { - ProtobufDiscoveryNode preferredTargetNode = ((ProtobufRemoteClusterAwareRequest) request).getPreferredTargetNode(); + if (request instanceof RemoteClusterAwareRequest) { + DiscoveryNode preferredTargetNode = ((RemoteClusterAwareRequest) request).getPreferredTargetNode(); connection = remoteClusterService.getConnectionProtobuf(preferredTargetNode, clusterAlias); } else { connection = remoteClusterService.getConnectionProtobuf(clusterAlias); diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareRequest.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareRequest.java deleted file mode 100644 index 80576bd9608bf..0000000000000 --- a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterAwareRequest.java +++ /dev/null @@ -1,29 +0,0 @@ -/* -* SPDX-License-Identifier: Apache-2.0 -* -* The OpenSearch Contributors require contributions made to -* this file be licensed under the Apache-2.0 license or a -* compatible open source license. -*/ - -package org.opensearch.transport; - -import org.opensearch.cluster.node.ProtobufDiscoveryNode; - -/** - * Request for remote clusters -* -* @opensearch.internal -*/ -public interface ProtobufRemoteClusterAwareRequest { - - /** - * Returns the preferred discovery node for this request. The remote cluster client will attempt to send - * this request directly to this node. Otherwise, it will send the request as a proxy action that will - * be routed by the remote cluster to this node. - * - * @return preferred discovery node - */ - ProtobufDiscoveryNode getPreferredTargetNode(); - -} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java index ef896f62e856e..71fc6c9154b73 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterConnection.java @@ -14,7 +14,7 @@ import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; import org.opensearch.action.support.ContextPreservingActionListener; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.ProtobufDiscoveryNodes; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -106,7 +106,7 @@ void ensureConnected(ActionListener listener) { } /** - * Collects all nodes on the connected cluster and returns / passes a nodeID to {@link ProtobufDiscoveryNode} lookup function + * Collects all nodes on the connected cluster and returns / passes a nodeID to {@link DiscoveryNode} lookup function * that returns null if the node ID is not found. * * The requests to get cluster state on the connected cluster are made in the system context because logically @@ -114,10 +114,10 @@ void ensureConnected(ActionListener listener) { * user who made the request that is using this method in its implementation is authorized to view the entire * cluster state. */ - void collectNodes(ActionListener> listener) { + void collectNodes(ActionListener> listener) { Runnable runnable = () -> { final ThreadContext threadContext = threadPool.getThreadContext(); - final ContextPreservingActionListener> contextPreservingActionListener = + final ContextPreservingActionListener> contextPreservingActionListener = new ContextPreservingActionListener<>(threadContext.newRestorableContext(false), listener); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we stash any context here since this is an internal execution and should not leak any existing context information @@ -172,10 +172,10 @@ public String executor() { } /** - * Returns a connection to the remote cluster, preferably a direct connection to the provided {@link ProtobufDiscoveryNode}. + * Returns a connection to the remote cluster, preferably a direct connection to the provided {@link DiscoveryNode}. * If such node is not connected, the returned connection will be a proxy connection that redirects to it. */ - Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode remoteClusterNode) { + Transport.ProtobufConnection getConnection(DiscoveryNode remoteClusterNode) { return remoteConnectionManager.getConnection(remoteClusterNode); } @@ -197,7 +197,7 @@ boolean assertNoRunningConnections() { return connectionStrategy.assertNoRunningConnections(); } - boolean isNodeConnected(final ProtobufDiscoveryNode node) { + boolean isNodeConnected(final DiscoveryNode node) { return remoteConnectionManager.nodeConnected(node); } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java index eef008cbb398e..bbcd06df4a1f7 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteClusterService.java @@ -16,7 +16,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.ProtobufClient; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.Strings; import org.opensearch.common.settings.ClusterSettings; @@ -130,7 +130,7 @@ public boolean isEnabled() { ProtobufRemoteClusterService(Settings settings, ProtobufTransportService transportService) { super(settings); - this.enabled = ProtobufDiscoveryNode.isRemoteClusterClient(settings); + this.enabled = DiscoveryNode.isRemoteClusterClient(settings); this.transportService = transportService; } @@ -141,7 +141,7 @@ public boolean isCrossClusterSearchEnabled() { return remoteClusters.isEmpty() == false; } - boolean isRemoteNodeConnected(final String remoteCluster, final ProtobufDiscoveryNode node) { + boolean isRemoteNodeConnected(final String remoteCluster, final DiscoveryNode node) { return remoteClusters.get(remoteCluster).isNodeConnected(node); } @@ -185,7 +185,7 @@ public Set getRegisteredRemoteClusterNames() { * * @throws IllegalArgumentException if the remote cluster is unknown */ - public Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node, String cluster) { + public Transport.ProtobufConnection getConnection(DiscoveryNode node, String cluster) { return getRemoteClusterConnection(cluster).getConnection(node); } @@ -346,10 +346,10 @@ public Stream getRemoteConnectionInfos() { } /** - * Collects all nodes of the given clusters and returns / passes a (clusterAlias, nodeId) to {@link ProtobufDiscoveryNode} + * Collects all nodes of the given clusters and returns / passes a (clusterAlias, nodeId) to {@link DiscoveryNode} * function on success. */ - public void collectNodes(Set clusters, ActionListener> listener) { + public void collectNodes(Set clusters, ActionListener> listener) { if (enabled == false) { throw new IllegalArgumentException( "this node does not have the " + DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName() + " role" @@ -363,14 +363,14 @@ public void collectNodes(Set clusters, ActionListener> clusterMap = new HashMap<>(); + final Map> clusterMap = new HashMap<>(); CountDown countDown = new CountDown(clusters.size()); - Function nullFunction = s -> null; + Function nullFunction = s -> null; for (final String cluster : clusters) { ProtobufRemoteClusterConnection connection = remoteClusters.get(cluster); - connection.collectNodes(new ActionListener>() { + connection.collectNodes(new ActionListener>() { @Override - public void onResponse(Function nodeLookup) { + public void onResponse(Function nodeLookup) { synchronized (clusterMap) { clusterMap.put(cluster, nodeLookup); } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java index 95c0e2a9624bd..f0c7a093d83f6 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionManager.java @@ -10,7 +10,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import java.io.IOException; import java.util.ArrayList; @@ -29,19 +29,19 @@ public class ProtobufRemoteConnectionManager implements ProtobufConnectionManage private final String clusterAlias; private final ProtobufConnectionManager delegate; private final AtomicLong counter = new AtomicLong(); - private volatile List connectedNodes = Collections.emptyList(); + private volatile List connectedNodes = Collections.emptyList(); ProtobufRemoteConnectionManager(String clusterAlias, ProtobufConnectionManager delegate) { this.clusterAlias = clusterAlias; this.delegate = delegate; this.delegate.addListener(new ProtobufTransportConnectionListener() { @Override - public void onNodeConnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) { + public void onNodeConnected(DiscoveryNode node, Transport.ProtobufConnection connection) { addConnectedNode(node); } @Override - public void onNodeDisconnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) { + public void onNodeDisconnected(DiscoveryNode node, Transport.ProtobufConnection connection) { removeConnectedNode(node); } }); @@ -49,7 +49,7 @@ public void onNodeDisconnected(ProtobufDiscoveryNode node, Transport.ProtobufCon @Override public void connectToNode( - ProtobufDiscoveryNode node, + DiscoveryNode node, ProtobufConnectionProfile connectionProfile, ProtobufConnectionManager.ConnectionValidator connectionValidator, ActionListener listener @@ -69,7 +69,7 @@ public void removeListener(ProtobufTransportConnectionListener listener) { @Override public void openConnection( - ProtobufDiscoveryNode node, + DiscoveryNode node, ProtobufConnectionProfile profile, ActionListener listener ) { @@ -77,7 +77,7 @@ public void openConnection( } @Override - public Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node) { + public Transport.ProtobufConnection getConnection(DiscoveryNode node) { try { return delegate.getConnection(node); } catch (NodeNotConnectedException e) { @@ -86,12 +86,12 @@ public Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node) { } @Override - public boolean nodeConnected(ProtobufDiscoveryNode node) { + public boolean nodeConnected(DiscoveryNode node) { return delegate.nodeConnected(node); } @Override - public void disconnectFromNode(ProtobufDiscoveryNode node) { + public void disconnectFromNode(DiscoveryNode node) { delegate.disconnectFromNode(node); } @@ -101,22 +101,20 @@ public ProtobufConnectionProfile getConnectionProfile() { } public Transport.ProtobufConnection getAnyRemoteConnection() { - List localConnectedNodes = this.connectedNodes; + List localConnectedNodes = this.connectedNodes; long curr; while ((curr = counter.incrementAndGet()) == Long.MIN_VALUE) ; if (localConnectedNodes.isEmpty() == false) { - ProtobufDiscoveryNode nextNode = localConnectedNodes.get( - Math.toIntExact(Math.floorMod(curr, (long) localConnectedNodes.size())) - ); + DiscoveryNode nextNode = localConnectedNodes.get(Math.toIntExact(Math.floorMod(curr, (long) localConnectedNodes.size()))); try { return delegate.getConnection(nextNode); } catch (NodeNotConnectedException e) { // Ignore. We will manually create an iterator of open nodes } } - Set allConnectionNodes = getAllConnectedNodes(); - for (ProtobufDiscoveryNode connectedNode : allConnectionNodes) { + Set allConnectionNodes = getAllConnectedNodes(); + for (DiscoveryNode connectedNode : allConnectionNodes) { try { return delegate.getConnection(connectedNode); } catch (NodeNotConnectedException e) { @@ -127,7 +125,7 @@ public Transport.ProtobufConnection getAnyRemoteConnection() { } @Override - public Set getAllConnectedNodes() { + public Set getAllConnectedNodes() { return delegate.getAllConnectedNodes(); } @@ -146,16 +144,16 @@ public void closeNoBlock() { delegate.closeNoBlock(); } - private synchronized void addConnectedNode(ProtobufDiscoveryNode addedNode) { - ArrayList newConnections = new ArrayList<>(this.connectedNodes); + private synchronized void addConnectedNode(DiscoveryNode addedNode) { + ArrayList newConnections = new ArrayList<>(this.connectedNodes); newConnections.add(addedNode); this.connectedNodes = Collections.unmodifiableList(newConnections); } - private synchronized void removeConnectedNode(ProtobufDiscoveryNode removedNode) { + private synchronized void removeConnectedNode(DiscoveryNode removedNode) { int newSize = this.connectedNodes.size() - 1; - ArrayList newConnectedNodes = new ArrayList<>(newSize); - for (ProtobufDiscoveryNode connectedNode : this.connectedNodes) { + ArrayList newConnectedNodes = new ArrayList<>(newSize); + for (DiscoveryNode connectedNode : this.connectedNodes) { if (connectedNode.equals(removedNode) == false) { newConnectedNodes.add(connectedNode); } @@ -166,15 +164,15 @@ private synchronized void removeConnectedNode(ProtobufDiscoveryNode removedNode) static final class ProxyConnection implements Transport.ProtobufConnection { private final Transport.ProtobufConnection connection; - private final ProtobufDiscoveryNode targetNode; + private final DiscoveryNode targetNode; - private ProxyConnection(Transport.ProtobufConnection connection, ProtobufDiscoveryNode targetNode) { + private ProxyConnection(Transport.ProtobufConnection connection, DiscoveryNode targetNode) { this.connection = connection; this.targetNode = targetNode; } @Override - public ProtobufDiscoveryNode getNode() { + public DiscoveryNode getNode() { return targetNode; } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java index 53f3ad80acc4e..3b9b68a3b5be3 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRemoteConnectionStrategy.java @@ -38,7 +38,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ContextPreservingActionListener; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.common.settings.Setting; @@ -361,7 +361,7 @@ boolean shouldRebuildConnection(Settings newSettings) { protected abstract ProtobufConnectionStrategy strategyType(); @Override - public void onNodeDisconnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) { + public void onNodeDisconnected(DiscoveryNode node, Transport.ProtobufConnection connection) { if (shouldOpenMoreConnections()) { // try to reconnect and fill up the slot of the disconnected node connect( diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java index 8a2d15db70e11..f2c65666bd7b9 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java @@ -74,7 +74,10 @@ public void processMessageReceived(Request request, ProtobufTransportChannel cha // ); // } final TcpChannel tcpChannel = ((ProtobufTcpTransportChannel) channel).getChannel(); - final Releasable stopTracking = taskManager.startProtobufTrackingCancellableChannelTask(tcpChannel, (ProtobufCancellableTask) task); + final Releasable stopTracking = taskManager.startProtobufTrackingCancellableChannelTask( + tcpChannel, + (ProtobufCancellableTask) task + ); unregisterTask = Releasables.wrap(unregisterTask, stopTracking); } final ProtobufTaskTransportChannel taskTransportChannel = new ProtobufTaskTransportChannel(channel, unregisterTask); diff --git a/server/src/main/java/org/opensearch/transport/ProtobufSendRequestTransportException.java b/server/src/main/java/org/opensearch/transport/ProtobufSendRequestTransportException.java index eb1e1340230c4..0281f1c530a44 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufSendRequestTransportException.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufSendRequestTransportException.java @@ -10,7 +10,7 @@ import com.google.protobuf.CodedInputStream; import org.opensearch.OpenSearchWrapperException; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import java.io.IOException; @@ -21,8 +21,8 @@ */ public class ProtobufSendRequestTransportException extends ProtobufActionTransportException implements OpenSearchWrapperException { - public ProtobufSendRequestTransportException(ProtobufDiscoveryNode node, String action, Throwable cause) { - super(node == null ? null : node.getName(), node == null ? null : node.getAddress(), action, cause); + public ProtobufSendRequestTransportException(DiscoveryNode node, String action, Throwable cause) { + super(node == null ? null : node.getName(), node == null ? null : node.getProtobufAddress(), action, cause); } public ProtobufSendRequestTransportException(CodedInputStream in) throws IOException { diff --git a/server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java index f503ff0951eda..edf73810a43c2 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufSniffConnectionStrategy.java @@ -17,8 +17,8 @@ import org.opensearch.action.admin.cluster.state.ClusterStateAction; import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; -import org.opensearch.cluster.ProtobufClusterName; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.Booleans; import org.opensearch.common.SetOnce; @@ -29,7 +29,7 @@ import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.ProtobufTransportAddress; +import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.util.io.IOUtils; @@ -118,14 +118,14 @@ public class ProtobufSniffConnectionStrategy extends ProtobufRemoteConnectionStr static final int CHANNELS_PER_CONNECTION = 6; - private static final Predicate DEFAULT_NODE_PREDICATE = (node) -> Version.CURRENT.isCompatible(node.getVersion()) + private static final Predicate DEFAULT_NODE_PREDICATE = (node) -> Version.CURRENT.isCompatible(node.getVersion()) && (node.isClusterManagerNode() == false || node.isDataNode() || node.isIngestNode()); private final List configuredSeedNodes; - private final List> seedNodes; + private final List> seedNodes; private final int maxNumRemoteConnections; - private final Predicate nodePredicate; - private final SetOnce remoteClusterName = new SetOnce<>(); + private final Predicate nodePredicate; + private final SetOnce remoteClusterName = new SetOnce<>(); private final String proxyAddress; ProtobufSniffConnectionStrategy( @@ -153,7 +153,7 @@ public class ProtobufSniffConnectionStrategy extends ProtobufRemoteConnectionStr String proxyAddress, Settings settings, int maxNumRemoteConnections, - Predicate nodePredicate, + Predicate nodePredicate, List configuredSeedNodes ) { this( @@ -166,7 +166,7 @@ public class ProtobufSniffConnectionStrategy extends ProtobufRemoteConnectionStr nodePredicate, configuredSeedNodes, configuredSeedNodes.stream() - .map(seedAddress -> (Supplier) () -> resolveSeedNode(clusterAlias, seedAddress, proxyAddress)) + .map(seedAddress -> (Supplier) () -> resolveSeedNode(clusterAlias, seedAddress, proxyAddress)) .collect(Collectors.toList()) ); } @@ -178,9 +178,9 @@ public class ProtobufSniffConnectionStrategy extends ProtobufRemoteConnectionStr String proxyAddress, Settings settings, int maxNumRemoteConnections, - Predicate nodePredicate, + Predicate nodePredicate, List configuredSeedNodes, - List> seedNodes + List> seedNodes ) { super(clusterAlias, transportService, connectionManager, settings); this.proxyAddress = proxyAddress; @@ -228,7 +228,7 @@ protected ProtobufRemoteConnectionInfo.ModeInfo getModeInfo() { return new SniffModeInfo(configuredSeedNodes, maxNumRemoteConnections, connectionManager.size()); } - private void collectRemoteNodes(Iterator> seedNodes, ActionListener listener) { + private void collectRemoteNodes(Iterator> seedNodes, ActionListener listener) { if (Thread.currentThread().isInterrupted()) { listener.onFailure(new InterruptedException("remote connect thread got interrupted")); return; @@ -254,7 +254,7 @@ private void collectRemoteNodes(Iterator> seedNo listener.onFailure(e); }; - final ProtobufDiscoveryNode seedNode = seedNodes.next().get(); + final DiscoveryNode seedNode = seedNodes.next().get(); logger.trace("[{}] opening transient connection to seed node: [{}]", clusterAlias, seedNode); final StepListener openConnectionStep = new StepListener<>(); try { @@ -276,7 +276,7 @@ private void collectRemoteNodes(Iterator> seedNo final StepListener fullConnectionStep = new StepListener<>(); handshakeStep.whenComplete(handshakeResponse -> { - final ProtobufDiscoveryNode handshakeNode = handshakeResponse.getDiscoveryNode(); + final DiscoveryNode handshakeNode = handshakeResponse.getDiscoveryNode(); if (nodePredicate.test(handshakeNode) && shouldOpenMoreConnections()) { logger.trace( @@ -285,7 +285,7 @@ private void collectRemoteNodes(Iterator> seedNo handshakeNode, proxyAddress ); - final ProtobufDiscoveryNode handshakeNodeWithProxy = maybeAddProxyAddress(proxyAddress, handshakeNode); + final DiscoveryNode handshakeNodeWithProxy = maybeAddProxyAddress(proxyAddress, handshakeNode); connectionManager.connectToNode( handshakeNodeWithProxy, null, @@ -297,7 +297,7 @@ private void collectRemoteNodes(Iterator> seedNo } }, e -> { final Transport.ProtobufConnection connection = openConnectionStep.result(); - final ProtobufDiscoveryNode node = connection.getNode(); + final DiscoveryNode node = connection.getNode(); logger.debug(() -> new ParameterizedMessage("[{}] failed to handshake with seed node: [{}]", clusterAlias, node), e); IOUtils.closeWhileHandlingException(connection); onFailure.accept(e); @@ -338,7 +338,7 @@ private void collectRemoteNodes(Iterator> seedNo } }, e -> { final Transport.ProtobufConnection connection = openConnectionStep.result(); - final ProtobufDiscoveryNode node = connection.getNode(); + final DiscoveryNode node = connection.getNode(); logger.debug( () -> new ParameterizedMessage("[{}] failed to open managed connection to seed node: [{}]", clusterAlias, node), e @@ -356,12 +356,12 @@ private class SniffClusterStateResponseHandler implements ProtobufTransportRespo private final Transport.ProtobufConnection connection; private final ActionListener listener; - private final Iterator> seedNodes; + private final Iterator> seedNodes; SniffClusterStateResponseHandler( Transport.ProtobufConnection connection, ActionListener listener, - Iterator> seedNodes + Iterator> seedNodes ) { this.connection = connection; this.listener = listener; @@ -378,12 +378,12 @@ public void handleResponse(ProtobufClusterStateResponse response) { handleNodes(response.getState().nodes().getNodes().valuesIt()); } - private void handleNodes(Iterator nodesIter) { + private void handleNodes(Iterator nodesIter) { while (nodesIter.hasNext()) { - final ProtobufDiscoveryNode node = nodesIter.next(); + final DiscoveryNode node = nodesIter.next(); if (nodePredicate.test(node) && shouldOpenMoreConnections()) { logger.trace("[{}] opening managed connection to node: [{}] proxy address: [{}]", clusterAlias, node, proxyAddress); - final ProtobufDiscoveryNode nodeWithProxy = maybeAddProxyAddress(proxyAddress, node); + final DiscoveryNode nodeWithProxy = maybeAddProxyAddress(proxyAddress, node); connectionManager.connectToNode( nodeWithProxy, null, @@ -451,10 +451,10 @@ public String executor() { } } - private Predicate getRemoteClusterNamePredicate() { - return new Predicate() { + private Predicate getRemoteClusterNamePredicate() { + return new Predicate() { @Override - public boolean test(ProtobufClusterName c) { + public boolean test(ClusterName c) { return remoteClusterName.get() == null || c.equals(remoteClusterName.get()); } @@ -467,18 +467,18 @@ public String toString() { }; } - private static ProtobufDiscoveryNode resolveSeedNode(String clusterAlias, String address, String proxyAddress) { + private static DiscoveryNode resolveSeedNode(String clusterAlias, String address, String proxyAddress) { if (proxyAddress == null || proxyAddress.isEmpty()) { - ProtobufTransportAddress transportAddress = new ProtobufTransportAddress(parseConfiguredAddress(address)); - return new ProtobufDiscoveryNode( + TransportAddress transportAddress = new TransportAddress(parseConfiguredAddress(address)); + return new DiscoveryNode( clusterAlias + "#" + transportAddress.toString(), transportAddress, Version.CURRENT.minimumCompatibilityVersion() ); } else { - ProtobufTransportAddress transportAddress = new ProtobufTransportAddress(parseConfiguredAddress(proxyAddress)); + TransportAddress transportAddress = new TransportAddress(parseConfiguredAddress(proxyAddress)); String hostName = ProtobufRemoteConnectionStrategy.parseHost(proxyAddress); - return new ProtobufDiscoveryNode( + return new DiscoveryNode( "", clusterAlias + "#" + address, UUIDs.randomBase64UUID(), @@ -493,7 +493,7 @@ private static ProtobufDiscoveryNode resolveSeedNode(String clusterAlias, String } // Default visibility for tests - static Predicate getNodePredicate(Settings settings) { + static Predicate getNodePredicate(Settings settings) { if (RemoteClusterService.REMOTE_NODE_ATTRIBUTE.exists(settings)) { // nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for cross cluster search String attribute = RemoteClusterService.REMOTE_NODE_ATTRIBUTE.get(settings); @@ -502,19 +502,19 @@ static Predicate getNodePredicate(Settings settings) { return DEFAULT_NODE_PREDICATE; } - private static ProtobufDiscoveryNode maybeAddProxyAddress(String proxyAddress, ProtobufDiscoveryNode node) { + private static DiscoveryNode maybeAddProxyAddress(String proxyAddress, DiscoveryNode node) { if (proxyAddress == null || proxyAddress.isEmpty()) { return node; } else { // resolve proxy address lazy here InetSocketAddress proxyInetAddress = parseConfiguredAddress(proxyAddress); - return new ProtobufDiscoveryNode( + return new DiscoveryNode( node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node.getHostAddress(), - new ProtobufTransportAddress(proxyInetAddress), + new TransportAddress(proxyInetAddress), node.getAttributes(), node.getRoles(), node.getVersion() diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransport.java b/server/src/main/java/org/opensearch/transport/ProtobufTransport.java deleted file mode 100644 index 234583dd011a8..0000000000000 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransport.java +++ /dev/null @@ -1,281 +0,0 @@ -/* -* SPDX-License-Identifier: Apache-2.0 -* -* The OpenSearch Contributors require contributions made to -* this file be licensed under the Apache-2.0 license or a -* compatible open source license. -*/ - -package org.opensearch.transport; - -import org.opensearch.Version; -import org.opensearch.action.ActionListener; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; -import org.opensearch.common.collect.MapBuilder; -import org.opensearch.common.component.LifecycleComponent; -import org.opensearch.common.transport.ProtobufBoundTransportAddress; -import org.opensearch.common.transport.ProtobufTransportAddress; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.util.concurrent.ConcurrentMapLong; - -import java.io.Closeable; -import java.io.IOException; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Predicate; - -/** - * OpenSearch ProtobufTransport Interface -* -* @opensearch.internal -*/ -public interface ProtobufTransport extends LifecycleComponent { - - /** - * Registers a new request handler - */ - default void registerRequestHandler(ProtobufRequestHandlerRegistry reg) { - getRequestHandlers().registerHandler(reg); - } - - void setMessageListener(ProtobufTransportMessageListener listener); - - default void setSlowLogThreshold(TimeValue slowLogThreshold) {} - - default boolean isSecure() { - return false; - } - - /** - * The address the transport is bound on. - */ - ProtobufBoundTransportAddress boundAddress(); - - /** - * Further profile bound addresses - * @return null iff profiles are unsupported, otherwise a map with name of profile and its bound transport address - */ - Map profileBoundAddresses(); - - /** - * Returns an address from its string representation. - */ - ProtobufTransportAddress[] addressesFromString(String address) throws UnknownHostException; - - /** - * Returns a list of all local addresses for this transport - */ - List getDefaultSeedAddresses(); - - /** - * Opens a new connection to the given node. When the connection is fully connected, the listener is called. - * The ActionListener will be called on the calling thread or the generic thread pool. - */ - void openConnection( - ProtobufDiscoveryNode node, - ProtobufConnectionProfile profile, - ActionListener listener - ); - - ProtobufTransportStats getStats(); - - ResponseHandlers getResponseHandlers(); - - RequestHandlers getRequestHandlers(); - - /** - * A unidirectional connection to a {@link ProtobufDiscoveryNode} - */ - interface Connection extends Closeable { - /** - * The node this connection is associated with - */ - ProtobufDiscoveryNode getNode(); - - /** - * Sends the request to the node this connection is associated with - * @param requestId see {@link ResponseHandlers#add(ResponseContext)} for details - * @param action the action to execute - * @param request the request to send - * @param options request options to apply - * @throws NodeNotConnectedException if the given node is not connected - */ - void sendRequest(long requestId, String action, ProtobufTransportRequest request, TransportRequestOptions options) - throws IOException, TransportException; - - /** - * The listener's {@link ActionListener#onResponse(Object)} method will be called when this - * connection is closed. No implementations currently throw an exception during close, so - * {@link ActionListener#onFailure(Exception)} will not be called. - * - * @param listener to be called - */ - void addCloseListener(ActionListener listener); - - boolean isClosed(); - - /** - * Returns the version of the node this connection was established with. - */ - default Version getVersion() { - return getNode().getVersion(); - } - - /** - * Returns a key that this connection can be cached on. Delegating subclasses must delegate method call to - * the original connection. - */ - default Object getCacheKey() { - return this; - } - - @Override - void close(); - } - - /** - * This class represents a response context that encapsulates the actual response handler, the action and the connection it was - * executed on. - */ - final class ResponseContext { - - private final ProtobufTransportResponseHandler handler; - - private final Connection connection; - - private final String action; - - ResponseContext(ProtobufTransportResponseHandler handler, Connection connection, String action) { - this.handler = handler; - this.connection = connection; - this.action = action; - } - - public ProtobufTransportResponseHandler handler() { - return handler; - } - - public Connection connection() { - return this.connection; - } - - public String action() { - return this.action; - } - } - - /** - * This class is a registry that allows - */ - final class ResponseHandlers { - private final ConcurrentMapLong> handlers = ConcurrentCollections - .newConcurrentMapLongWithAggressiveConcurrency(); - private final AtomicLong requestIdGenerator = new AtomicLong(); - - /** - * Returns true if the give request ID has a context associated with it. - */ - public boolean contains(long requestId) { - return handlers.containsKey(requestId); - } - - /** - * Removes and return the {@link ResponseContext} for the given request ID or returns - * null if no context is associated with this request ID. - */ - public ResponseContext remove(long requestId) { - return handlers.remove(requestId); - } - - /** - * Adds a new response context and associates it with a new request ID. - * @return the new request ID - * @see Connection#sendRequest(long, String, ProtobufTransportRequest, TransportRequestOptions) - */ - public long add(ResponseContext holder) { - long requestId = newRequestId(); - ResponseContext existing = handlers.put(requestId, holder); - assert existing == null : "request ID already in use: " + requestId; - return requestId; - } - - /** - * Returns a new request ID to use when sending a message via {@link Connection#sendRequest(long, String, - * ProtobufTransportRequest, TransportRequestOptions)} - */ - long newRequestId() { - return requestIdGenerator.incrementAndGet(); - } - - /** - * Removes and returns all {@link ResponseContext} instances that match the predicate - */ - public List> prune( - Predicate> predicate - ) { - final List> holders = new ArrayList<>(); - for (Map.Entry> entry : handlers.entrySet()) { - ResponseContext holder = entry.getValue(); - if (predicate.test(holder)) { - ResponseContext remove = handlers.remove(entry.getKey()); - if (remove != null) { - holders.add(holder); - } - } - } - return holders; - } - - /** - * called by the {@link ProtobufTransport} implementation when a response or an exception has been received for a previously - * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not - * found. - */ - public ProtobufTransportResponseHandler onResponseReceived( - final long requestId, - final ProtobufTransportMessageListener listener - ) { - ResponseContext context = handlers.remove(requestId); - // listener.onResponseReceived(requestId, context); - if (context == null) { - return null; - } else { - return context.handler(); - } - } - } - - /** - * Request handler implementations - * - * @opensearch.internal - */ - final class RequestHandlers { - - private volatile Map> requestHandlers = Collections - .emptyMap(); - - synchronized void registerHandler(ProtobufRequestHandlerRegistry reg) { - if (requestHandlers.containsKey(reg.getAction())) { - throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); - } - requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); - } - - // TODO: Only visible for testing. Perhaps move StubbableTransport from - // org.opensearch.test.transport to org.opensearch.transport - public synchronized void forceRegister(ProtobufRequestHandlerRegistry reg) { - requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); - } - - @SuppressWarnings("unchecked") - public ProtobufRequestHandlerRegistry getHandler(String action) { - return (ProtobufRequestHandlerRegistry) requestHandlers.get(action); - } - } -} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java index 15b51edeabd0f..22e52438a8e99 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportActionProxy.java @@ -10,7 +10,7 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.tasks.ProtobufTask; import org.opensearch.threadpool.ThreadPool; @@ -53,7 +53,7 @@ private static class ProxyRequestHandler implements Prot @Override public void messageReceived(T request, ProtobufTransportChannel channel, ProtobufTask task) throws Exception { - ProtobufDiscoveryNode targetNode = request.targetNode; + DiscoveryNode targetNode = request.targetNode; ProtobufTransportRequest wrappedRequest = request.wrapped; service.sendRequest( targetNode, @@ -115,16 +115,16 @@ public String executor() { */ static class ProxyRequest extends ProtobufTransportRequest { final T wrapped; - final ProtobufDiscoveryNode targetNode; + final DiscoveryNode targetNode; - ProxyRequest(T wrapped, ProtobufDiscoveryNode targetNode) { + ProxyRequest(T wrapped, DiscoveryNode targetNode) { this.wrapped = wrapped; this.targetNode = targetNode; } ProxyRequest(CodedInputStream in, ProtobufWriteable.Reader reader) throws IOException { super(in); - targetNode = new ProtobufDiscoveryNode(in); + targetNode = new DiscoveryNode(in); wrapped = reader.read(in); } @@ -188,7 +188,7 @@ public static String getProxyAction(String action) { /** * Wraps the actual request in a proxy request object that encodes the target node. */ - public static ProtobufTransportRequest wrapRequest(ProtobufDiscoveryNode node, ProtobufTransportRequest request) { + public static ProtobufTransportRequest wrapRequest(DiscoveryNode node, ProtobufTransportRequest request) { return new ProxyRequest<>(request, node); } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java index 30c66104145e8..228a5dfe5367e 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportConnectionListener.java @@ -8,7 +8,7 @@ package org.opensearch.transport; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; /** * A listener interface that allows to react on transport events. All methods may be @@ -34,10 +34,10 @@ default void onConnectionClosed(Transport.ProtobufConnection connection) {} /** * Called once a node connection is opened and registered. */ - default void onNodeConnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) {} + default void onNodeConnected(DiscoveryNode node, Transport.ProtobufConnection connection) {} /** * Called once a node connection is closed and unregistered. */ - default void onNodeDisconnected(ProtobufDiscoveryNode node, Transport.ProtobufConnection connection) {} + default void onNodeDisconnected(DiscoveryNode node, Transport.ProtobufConnection connection) {} } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportInfo.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportInfo.java index 633955d561fad..ff3dc3bb434ad 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportInfo.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportInfo.java @@ -20,6 +20,8 @@ import org.opensearch.common.network.InetAddresses; import org.opensearch.common.transport.ProtobufBoundTransportAddress; import org.opensearch.common.transport.ProtobufTransportAddress; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.ProtobufReportingService; import java.io.IOException; @@ -135,4 +137,24 @@ public Map getProfileAddresses() { public Map profileAddresses() { return profileAddresses; } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.TRANSPORT); + builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses()); + builder.field(Fields.PUBLISH_ADDRESS, formatPublishAddressString("transport.publish_address", address.publishAddress())); + builder.startObject(Fields.PROFILES); + if (profileAddresses != null && profileAddresses.size() > 0) { + for (Map.Entry entry : profileAddresses.entrySet()) { + builder.startObject(entry.getKey()); + builder.array(Fields.BOUND_ADDRESS, (Object[]) entry.getValue().boundAddresses()); + String propertyName = "transport." + entry.getKey() + ".publish_address"; + builder.field(Fields.PUBLISH_ADDRESS, formatPublishAddressString(propertyName, entry.getValue().publishAddress())); + builder.endObject(); + } + } + builder.endObject(); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java index bf109d0518693..e2624a1a16b20 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportInterceptor.java @@ -8,7 +8,7 @@ package org.opensearch.transport; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.ProtobufWriteable.Reader; /** @@ -35,8 +35,8 @@ default ProtobufTransportRequestHandler /** * This is called up-front providing the actual low level {@link AsyncSender} that performs the low level send request. * The returned sender is used to send all requests that come in via - * {@link ProtobufTransportService#sendRequest(ProtobufDiscoveryNode, String, ProtobufTransportRequest, ProtobufTransportResponseHandler)} or - * {@link ProtobufTransportService#sendRequest(ProtobufDiscoveryNode, String, ProtobufTransportRequest, TransportRequestOptions, ProtobufTransportResponseHandler)}. + * {@link ProtobufTransportService#sendRequest(DiscoveryNode, String, ProtobufTransportRequest, ProtobufTransportResponseHandler)} or + * {@link ProtobufTransportService#sendRequest(DiscoveryNode, String, ProtobufTransportRequest, TransportRequestOptions, ProtobufTransportResponseHandler)}. * This allows plugins to perform actions on each send request including modifying the request context etc. */ default AsyncSender interceptSender(AsyncSender sender) { diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java index 5c58c2064801f..d0167c8c61570 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportMessageListener.java @@ -8,7 +8,7 @@ package org.opensearch.transport; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; /** * Listens for transport messages @@ -53,7 +53,7 @@ default void onResponseSent(long requestId, String action, Exception error) {} * @param finalOptions the request options */ default void onRequestSent( - ProtobufDiscoveryNode node, + DiscoveryNode node, long requestId, String action, ProtobufTransportRequest request, diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java index 4b1e934977c7e..3f708b5be1a03 100644 --- a/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportService.java @@ -15,11 +15,10 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.ProtobufActionListenerResponseHandler; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.cluster.ProtobufClusterName; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.component.AbstractLifecycleComponent; @@ -32,7 +31,8 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.ProtobufBoundTransportAddress; -import org.opensearch.common.transport.ProtobufTransportAddress; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -83,10 +83,10 @@ public class ProtobufTransportService extends AbstractLifecycleComponent protected final Transport transport; protected final ProtobufConnectionManager connectionManager; protected final ThreadPool threadPool; - protected final ProtobufClusterName clusterName; + protected final ClusterName clusterName; protected final TaskManager taskManager; private final ProtobufTransportInterceptor.AsyncSender asyncSender; - private final Function localNodeFactory; + private final Function localNodeFactory; private final boolean remoteClusterClient; private final Transport.ProtobufResponseHandlers responseHandlers; private final ProtobufTransportInterceptor interceptor; @@ -115,10 +115,10 @@ protected boolean removeEldestEntry(Map.Entry eldest) { private final RemoteClusterService remoteClusterService; /** if set will call requests sent to this id to shortcut and executed locally */ - volatile ProtobufDiscoveryNode localNode = null; + volatile DiscoveryNode localNode = null; private final Transport.ProtobufConnection localNodeConnection = new Transport.ProtobufConnection() { @Override - public ProtobufDiscoveryNode getNode() { + public DiscoveryNode getNode() { return localNode; } @@ -151,7 +151,7 @@ public ProtobufTransportService( Transport transport, ThreadPool threadPool, ProtobufTransportInterceptor transportInterceptor, - Function localNodeFactory, + Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders ) { @@ -172,7 +172,7 @@ public ProtobufTransportService( Transport transport, ThreadPool threadPool, ProtobufTransportInterceptor transportInterceptor, - Function localNodeFactory, + Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders, ProtobufConnectionManager connectionManager @@ -182,14 +182,14 @@ public ProtobufTransportService( this.threadPool = threadPool; this.localNodeFactory = localNodeFactory; this.connectionManager = connectionManager; - this.clusterName = ProtobufClusterName.CLUSTER_NAME_SETTING.get(settings); + this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); setTracerLogInclude(TransportSettings.TRACE_LOG_INCLUDE_SETTING.get(settings)); setTracerLogExclude(TransportSettings.TRACE_LOG_EXCLUDE_SETTING.get(settings)); tracerLog = Loggers.getLogger(logger, ".tracer"); taskManager = createTaskManager(settings, clusterSettings, threadPool, taskHeaders); this.interceptor = transportInterceptor; this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); - this.remoteClusterClient = ProtobufDiscoveryNode.isRemoteClusterClient(settings); + this.remoteClusterClient = DiscoveryNode.isRemoteClusterClient(settings); remoteClusterService = new RemoteClusterService(settings, this); responseHandlers = transport.getProtobufResponseHandlers(); if (clusterSettings != null) { @@ -214,7 +214,7 @@ public RemoteClusterService getRemoteClusterService() { return remoteClusterService; } - public ProtobufDiscoveryNode getLocalNode() { + public DiscoveryNode getLocalNode() { return localNode; } @@ -263,6 +263,7 @@ protected void doStart() { logger.info("profile [{}]: {}", entry.getKey(), entry.getValue()); } } + localNode = localNodeFactory.apply(transport.boundProtobufAddress()); if (remoteClusterClient) { @@ -271,6 +272,10 @@ protected void doStart() { } } + public void setLocalNode(DiscoveryNode localNode) { + this.localNode = localNode; + } + @Override protected void doStop() { try { @@ -363,7 +368,7 @@ public List getDefaultSeedAddresses() { /** * Returns true iff the given node is already connected. */ - public boolean nodeConnected(ProtobufDiscoveryNode node) { + public boolean nodeConnected(DiscoveryNode node) { return isLocalNode(node) || connectionManager.nodeConnected(node); } @@ -372,12 +377,12 @@ public boolean nodeConnected(ProtobufDiscoveryNode node) { * * @param node the node to connect to */ - public void connectToNode(ProtobufDiscoveryNode node) throws ProtobufConnectTransportException { + public void connectToNode(DiscoveryNode node) throws ProtobufConnectTransportException { connectToNode(node, (ProtobufConnectionProfile) null); } // We are skipping node validation for extensibility as extensionNode and opensearchNode(LocalNode) will have different ephemeral id's - public void connectToExtensionNode(final ProtobufDiscoveryNode node) { + public void connectToExtensionNode(final DiscoveryNode node) { PlainActionFuture.get(fut -> connectToExtensionNode(node, (ProtobufConnectionProfile) null, ActionListener.map(fut, x -> null))); } @@ -387,11 +392,11 @@ public void connectToExtensionNode(final ProtobufDiscoveryNode node) { * @param node the node to connect to * @param connectionProfile the connection profile to use when connecting to this node */ - public void connectToNode(final ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile) { + public void connectToNode(final DiscoveryNode node, ProtobufConnectionProfile connectionProfile) { PlainActionFuture.get(fut -> connectToNode(node, connectionProfile, ActionListener.map(fut, x -> null))); } - public void connectToExtensionNode(final ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile) { + public void connectToExtensionNode(final DiscoveryNode node, ProtobufConnectionProfile connectionProfile) { PlainActionFuture.get(fut -> connectToExtensionNode(node, connectionProfile, ActionListener.map(fut, x -> null))); } @@ -402,11 +407,11 @@ public void connectToExtensionNode(final ProtobufDiscoveryNode node, ProtobufCon * @param node the node to connect to * @param listener the action listener to notify */ - public void connectToNode(ProtobufDiscoveryNode node, ActionListener listener) throws ProtobufConnectTransportException { + public void connectToNode(DiscoveryNode node, ActionListener listener) throws ProtobufConnectTransportException { connectToNode(node, null, listener); } - public void connectToExtensionNode(ProtobufDiscoveryNode node, ActionListener listener) throws ProtobufConnectTransportException { + public void connectToExtensionNode(DiscoveryNode node, ActionListener listener) throws ProtobufConnectTransportException { connectToExtensionNode(node, null, listener); } @@ -418,11 +423,7 @@ public void connectToExtensionNode(ProtobufDiscoveryNode node, ActionListener listener - ) { + public void connectToNode(final DiscoveryNode node, ProtobufConnectionProfile connectionProfile, ActionListener listener) { if (isLocalNode(node)) { listener.onResponse(null); return; @@ -431,7 +432,7 @@ public void connectToNode( } public void connectToExtensionNode( - final ProtobufDiscoveryNode node, + final DiscoveryNode node, ProtobufConnectionProfile connectionProfile, ActionListener listener ) { @@ -442,11 +443,11 @@ public void connectToExtensionNode( connectionManager.connectToNode(node, connectionProfile, extensionConnectionValidator(node), listener); } - public ProtobufConnectionManager.ConnectionValidator connectionValidator(ProtobufDiscoveryNode node) { + public ProtobufConnectionManager.ConnectionValidator connectionValidator(DiscoveryNode node) { return (newConnection, actualProfile, listener) -> { // We don't validate cluster names to allow for CCS connections. handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { - final ProtobufDiscoveryNode remote = resp.discoveryNode; + final DiscoveryNode remote = resp.discoveryNode; if (node.equals(remote) == false) { throw new ProtobufConnectTransportException(node, "handshake failed. unexpected remote node " + remote); @@ -457,11 +458,11 @@ public ProtobufConnectionManager.ConnectionValidator connectionValidator(Protobu }; } - public ProtobufConnectionManager.ConnectionValidator extensionConnectionValidator(ProtobufDiscoveryNode node) { + public ProtobufConnectionManager.ConnectionValidator extensionConnectionValidator(DiscoveryNode node) { return (newConnection, actualProfile, listener) -> { // We don't validate cluster names to allow for CCS connections. handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { - final ProtobufDiscoveryNode remote = resp.discoveryNode; + final DiscoveryNode remote = resp.discoveryNode; logger.info("Connection validation was skipped"); return null; })); @@ -475,7 +476,7 @@ public ProtobufConnectionManager.ConnectionValidator extensionConnectionValidato * @param node the node to connect to * @param connectionProfile the connection profile to use */ - public Transport.ProtobufConnection openConnection(final ProtobufDiscoveryNode node, ProtobufConnectionProfile connectionProfile) { + public Transport.ProtobufConnection openConnection(final DiscoveryNode node, ProtobufConnectionProfile connectionProfile) { return PlainActionFuture.get(fut -> openConnection(node, connectionProfile, fut)); } @@ -488,7 +489,7 @@ public Transport.ProtobufConnection openConnection(final ProtobufDiscoveryNode n * @param listener the action listener to notify */ public void openConnection( - final ProtobufDiscoveryNode node, + final DiscoveryNode node, ProtobufConnectionProfile connectionProfile, ActionListener listener ) { @@ -515,7 +516,7 @@ public void openConnection( public void handshake( final Transport.ProtobufConnection connection, final long handshakeTimeout, - final ActionListener listener + final ActionListener listener ) { handshake( connection, @@ -541,10 +542,10 @@ public void handshake( public void handshake( final Transport.ProtobufConnection connection, final long handshakeTimeout, - Predicate clusterNamePredicate, + Predicate clusterNamePredicate, final ActionListener listener ) { - final ProtobufDiscoveryNode node = connection.getNode(); + final DiscoveryNode node = connection.getNode(); sendRequest( connection, HANDSHAKE_ACTION_NAME, @@ -616,11 +617,11 @@ private HandshakeRequest() {} * @opensearch.internal */ public static class HandshakeResponse extends ProtobufTransportResponse { - private final ProtobufDiscoveryNode discoveryNode; - private final ProtobufClusterName clusterName; + private final DiscoveryNode discoveryNode; + private final ClusterName clusterName; private final Version version; - public HandshakeResponse(ProtobufDiscoveryNode discoveryNode, ProtobufClusterName clusterName, Version version) { + public HandshakeResponse(DiscoveryNode discoveryNode, ClusterName clusterName, Version version) { this.discoveryNode = discoveryNode; this.version = version; this.clusterName = clusterName; @@ -629,8 +630,8 @@ public HandshakeResponse(ProtobufDiscoveryNode discoveryNode, ProtobufClusterNam public HandshakeResponse(CodedInputStream in) throws IOException { super(in); ProtobufStreamInput protobufStreamInput = new ProtobufStreamInput(in); - discoveryNode = protobufStreamInput.readOptionalWriteable(ProtobufDiscoveryNode::new); - clusterName = new ProtobufClusterName(in); + discoveryNode = protobufStreamInput.readOptionalWriteable(DiscoveryNode::new); + clusterName = new ClusterName(in); version = Version.readVersionProtobuf(in); } @@ -642,16 +643,16 @@ public void writeTo(CodedOutputStream out) throws IOException { out.writeInt32NoTag(version.id); } - public ProtobufDiscoveryNode getDiscoveryNode() { + public DiscoveryNode getDiscoveryNode() { return discoveryNode; } - public ProtobufClusterName getClusterName() { + public ClusterName getClusterName() { return clusterName; } } - public void disconnectFromNode(ProtobufDiscoveryNode node) { + public void disconnectFromNode(DiscoveryNode node) { if (isLocalNode(node)) { return; } @@ -675,7 +676,7 @@ public void removeConnectionListener(ProtobufTransportConnectionListener listene } public TransportFuture submitRequest( - ProtobufDiscoveryNode node, + DiscoveryNode node, String action, ProtobufTransportRequest request, ProtobufTransportResponseHandler handler @@ -684,7 +685,7 @@ public TransportFuture submitRequest( } public TransportFuture submitRequest( - ProtobufDiscoveryNode node, + DiscoveryNode node, String action, ProtobufTransportRequest request, TransportRequestOptions options, @@ -702,7 +703,7 @@ public TransportFuture submitRequest( } public void sendRequest( - final ProtobufDiscoveryNode node, + final DiscoveryNode node, final String action, final ProtobufTransportRequest request, final ProtobufTransportResponseHandler handler @@ -719,7 +720,7 @@ public void sendRequest( } public final void sendRequest( - final ProtobufDiscoveryNode node, + final DiscoveryNode node, final String action, final ProtobufTransportRequest request, final TransportRequestOptions options, @@ -758,7 +759,10 @@ public final void sendRequest( final ProtobufTransportResponseHandler delegate; if (request.getParentTask().isSet()) { // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. - final Releasable unregisterChildNode = taskManager.registerProtobufChildNode(request.getParentTask().getId(), connection.getNode()); + final Releasable unregisterChildNode = taskManager.registerProtobufChildNode( + request.getParentTask().getId(), + connection.getNode() + ); delegate = new ProtobufTransportResponseHandler() { @Override public void handleResponse(T response) { @@ -807,7 +811,7 @@ public String toString() { * Returns either a real transport connection or a local node connection if we are using the local node optimization. * @throws ProtobufNodeNotConnectedException if the given node is not connected */ - public Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node) { + public Transport.ProtobufConnection getConnection(DiscoveryNode node) { if (isLocalNode(node)) { return localNodeConnection; } else { @@ -816,7 +820,7 @@ public Transport.ProtobufConnection getConnection(ProtobufDiscoveryNode node) { } public final void sendChildRequest( - final ProtobufDiscoveryNode node, + final DiscoveryNode node, final String action, final ProtobufTransportRequest request, final ProtobufTask parentTask, @@ -866,7 +870,7 @@ private void sendRequestInternal( if (connection == null) { throw new IllegalStateException("can't send request to a null connection"); } - ProtobufDiscoveryNode node = connection.getNode(); + DiscoveryNode node = connection.getNode(); Supplier storedContextSupplier = threadPool.getThreadContext().newRestorableContext(true); ContextRestoreResponseHandler responseHandler = new ContextRestoreResponseHandler<>(storedContextSupplier, handler); @@ -1024,8 +1028,8 @@ public static boolean shouldTraceAction(String action, String[] include, String[ return true; } - public ProtobufTransportAddress[] addressesFromString(String address) throws UnknownHostException { - return transport.addressesFromStringProtobuf(address); + public TransportAddress[] addressesFromString(String address) throws UnknownHostException { + return transport.addressesFromString(address); } /** @@ -1149,7 +1153,7 @@ public void onRequestReceived(long requestId, String action) { /** called by the {@link Transport} implementation once a request has been sent */ @Override public void onRequestSent( - ProtobufDiscoveryNode node, + DiscoveryNode node, long requestId, String action, ProtobufTransportRequest request, @@ -1195,7 +1199,7 @@ public ProtobufRequestHandlerRegistry getReq private void checkForTimeout(long requestId) { // lets see if its in the timeout holder, but sync on mutex to make sure any ongoing timeout handling has finished - final ProtobufDiscoveryNode sourceNode; + final DiscoveryNode sourceNode; final String action; assert responseHandlers.contains(requestId) == false; TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId); @@ -1261,10 +1265,10 @@ final class TimeoutHandler implements Runnable { private final long requestId; private final long sentTime = threadPool.relativeTimeInMillis(); private final String action; - private final ProtobufDiscoveryNode node; + private final DiscoveryNode node; volatile Scheduler.Cancellable cancellable; - TimeoutHandler(long requestId, ProtobufDiscoveryNode node, String action) { + TimeoutHandler(long requestId, DiscoveryNode node, String action) { this.requestId = requestId; this.node = node; this.action = action; @@ -1325,19 +1329,19 @@ private void scheduleTimeout(TimeValue timeout) { */ static class TimeoutInfoHolder { - private final ProtobufDiscoveryNode node; + private final DiscoveryNode node; private final String action; private final long sentTime; private final long timeoutTime; - TimeoutInfoHolder(ProtobufDiscoveryNode node, String action, long sentTime, long timeoutTime) { + TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) { this.node = node; this.action = action; this.sentTime = sentTime; this.timeoutTime = timeoutTime; } - public ProtobufDiscoveryNode node() { + public DiscoveryNode node() { return node; } @@ -1421,14 +1425,14 @@ void setTimeoutHandler(TimeoutHandler handler) { * @opensearch.internal */ static class DirectResponseChannel implements ProtobufTransportChannel { - final ProtobufDiscoveryNode localNode; + final DiscoveryNode localNode; private final String action; private final long requestId; final ProtobufTransportService service; final ThreadPool threadPool; DirectResponseChannel( - ProtobufDiscoveryNode localNode, + DiscoveryNode localNode, String action, long requestId, ProtobufTransportService service, @@ -1510,7 +1514,7 @@ protected ProtobufRemoteTransportException wrapInRemote(Exception e) { if (e instanceof ProtobufRemoteTransportException) { return (ProtobufRemoteTransportException) e; } - return new ProtobufRemoteTransportException(localNode.getName(), localNode.getAddress(), action, e); + return new ProtobufRemoteTransportException(localNode.getName(), localNode.getProtobufAddress(), action, e); } protected void processException(final ProtobufTransportResponseHandler handler, final ProtobufRemoteTransportException rtx) { @@ -1542,7 +1546,7 @@ public ThreadPool getThreadPool() { return threadPool; } - private boolean isLocalNode(ProtobufDiscoveryNode discoveryNode) { + private boolean isLocalNode(DiscoveryNode discoveryNode) { return Objects.requireNonNull(discoveryNode, "discovery node must not be null").equals(localNode); } @@ -1573,7 +1577,7 @@ public void onResponseSent(long requestId, String action, Exception error) { @Override public void onRequestSent( - ProtobufDiscoveryNode node, + DiscoveryNode node, long requestId, String action, ProtobufTransportRequest request, diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java index ae4a98b04fdef..5ed96e9d85943 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java @@ -42,7 +42,7 @@ import org.opensearch.client.ProtobufClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Strings; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -164,7 +164,7 @@ public boolean isEnabled() { RemoteClusterService(Settings settings, ProtobufTransportService transportService) { super(settings); - this.enabled = ProtobufDiscoveryNode.isRemoteClusterClient(settings); + this.enabled = DiscoveryNode.isRemoteClusterClient(settings); this.protobufTransportService = transportService; this.transportService = null; } @@ -236,7 +236,7 @@ public Transport.Connection getConnection(DiscoveryNode node, String cluster) { * * @throws IllegalArgumentException if the remote cluster is unknown */ - public Transport.ProtobufConnection getConnectionProtobuf(ProtobufDiscoveryNode node, String cluster) { + public Transport.ProtobufConnection getConnectionProtobuf(DiscoveryNode node, String cluster) { return getRemoteClusterConnectionProtobuf(cluster).getConnection(node); } diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index a72ba2b78fef6..07fd66367b8c5 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -39,7 +39,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ThreadedActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Booleans; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.bytes.BytesArray; @@ -138,6 +138,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final CircuitBreakerService circuitBreakerService; private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); + private final ConcurrentMap profileProtobufBoundAddresses = newConcurrentMap(); private final Map> serverChannels = newConcurrentMap(); private final Set acceptedChannels = ConcurrentCollections.newConcurrentSet(); @@ -145,6 +146,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements // connections while no connect operations is going on private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); private volatile BoundTransportAddress boundAddress; + private volatile ProtobufBoundTransportAddress protobufBoundAddress; private final TransportHandshaker handshaker; private final TransportKeepAlive keepAlive; @@ -398,6 +400,16 @@ public Map profileBoundAddresses() { return unmodifiableMap(new HashMap<>(profileBoundAddresses)); } + @Override + public ProtobufBoundTransportAddress boundProtobufAddress() { + return this.protobufBoundAddress; + } + + @Override + public Map profileProtobufBoundAddresses() { + return unmodifiableMap(new HashMap<>(profileProtobufBoundAddresses)); + } + @Override public List getDefaultSeedAddresses() { List local = new ArrayList<>(); @@ -436,11 +448,14 @@ protected void bindServer(ProfileSettings profileSettings) { } final BoundTransportAddress boundTransportAddress = createBoundTransportAddress(profileSettings, boundAddresses); + final ProtobufBoundTransportAddress protobufBoundTransportAddress = createProtobufBoundTransportAddress(boundTransportAddress); if (profileSettings.isDefaultProfile) { this.boundAddress = boundTransportAddress; + this.protobufBoundAddress = protobufBoundTransportAddress; } else { profileBoundAddresses.put(profileSettings.profileName, boundTransportAddress); + profileProtobufBoundAddresses.put(profileSettings.profileName, protobufBoundTransportAddress); } } @@ -510,6 +525,43 @@ private BoundTransportAddress createBoundTransportAddress(ProfileSettings profil return new BoundTransportAddress(transportBoundAddresses, publishAddress); } + private ProtobufBoundTransportAddress createProtobufBoundTransportAddress(BoundTransportAddress boundTransportAddress) { + TransportAddress[] transportBoundAddresses = boundTransportAddress.boundAddresses(); + TransportAddress publishAddress = boundTransportAddress.publishAddress(); + ProtobufTransportAddress[] transportBoundAddressesProtobuf = new ProtobufTransportAddress[transportBoundAddresses.length]; + for (int i = 0; i < transportBoundAddresses.length; i++) { + transportBoundAddressesProtobuf[i] = new ProtobufTransportAddress(transportBoundAddresses[i].address()); + } + return new ProtobufBoundTransportAddress(transportBoundAddressesProtobuf, new ProtobufTransportAddress(publishAddress.address())); + // String[] boundAddressesHostStrings = new String[boundAddresses.size()]; + // ProtobufTransportAddress[] transportBoundAddresses = new ProtobufTransportAddress[boundAddresses.size()]; + // for (int i = 0; i < boundAddresses.size(); i++) { + // InetSocketAddress boundAddress = boundAddresses.get(i); + // boundAddressesHostStrings[i] = boundAddress.getHostString(); + // transportBoundAddresses[i] = new ProtobufTransportAddress(boundAddress); + // } + + // List publishHosts = profileSettings.publishHosts; + // if (profileSettings.isDefaultProfile == false && publishHosts.isEmpty()) { + // publishHosts = Arrays.asList(boundAddressesHostStrings); + // } + // if (publishHosts.isEmpty()) { + // publishHosts = NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings); + // } + + // final InetAddress publishInetAddress; + // try { + // publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts.toArray(Strings.EMPTY_ARRAY)); + // } catch (Exception e) { + // throw new BindTransportException("Failed to resolve publish address", e); + // } + + // final int publishPort = resolvePublishPort(profileSettings, boundAddresses, publishInetAddress); + // final ProtobufTransportAddress publishAddress = new ProtobufTransportAddress(new InetSocketAddress(publishInetAddress, + // publishPort)); + // return new ProtobufBoundTransportAddress(transportBoundAddresses, publishAddress); + } + // package private for tests static int resolvePublishPort(ProfileSettings profileSettings, List boundAddresses, InetAddress publishInetAddress) { int publishPort = profileSettings.publishPort; @@ -1022,20 +1074,8 @@ public final RequestHandlers getRequestHandlers() { @Override public void setMessageListener(ProtobufTransportMessageListener listener) { - // TODO Auto-generated method stub - throw new UnsupportedOperationException("Unimplemented method 'setMessageListener'"); - } - - @Override - public ProtobufBoundTransportAddress boundProtobufAddress() { - // TODO Auto-generated method stub - throw new UnsupportedOperationException("Unimplemented method 'boundProtobufAddress'"); - } - - @Override - public Map profileProtobufBoundAddresses() { - // TODO Auto-generated method stub - throw new UnsupportedOperationException("Unimplemented method 'profileProtobufBoundAddresses'"); + outboundHandler.setProtobufMessageListener(listener); + inboundHandler.setProtobufMessageListener(listener); } @Override @@ -1045,8 +1085,7 @@ public ProtobufTransportAddress[] addressesFromStringProtobuf(String address) th } @Override - public void openProtobufConnection(ProtobufDiscoveryNode node, ProtobufConnectionProfile profile, - ActionListener listener) { + public void openProtobufConnection(DiscoveryNode node, ProtobufConnectionProfile profile, ActionListener listener) { // TODO Auto-generated method stub throw new UnsupportedOperationException("Unimplemented method 'openProtobufConnection'"); } diff --git a/server/src/main/java/org/opensearch/transport/Transport.java b/server/src/main/java/org/opensearch/transport/Transport.java index 079934562a03d..09c8cc1334147 100644 --- a/server/src/main/java/org/opensearch/transport/Transport.java +++ b/server/src/main/java/org/opensearch/transport/Transport.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.transport.BoundTransportAddress; @@ -119,7 +119,6 @@ default boolean isSecure() { */ ProtobufTransportAddress[] addressesFromStringProtobuf(String address) throws UnknownHostException; - /** * Returns a list of all local addresses for this transport */ @@ -136,7 +135,7 @@ default boolean isSecure() { * The ActionListener will be called on the calling thread or the generic thread pool. */ void openProtobufConnection( - ProtobufDiscoveryNode node, + DiscoveryNode node, ProtobufConnectionProfile profile, ActionListener listener ); @@ -204,13 +203,13 @@ default Object getCacheKey() { } /** - * A unidirectional connection to a {@link ProtobufDiscoveryNode} + * A unidirectional connection to a {@link DiscoveryNode} */ interface ProtobufConnection extends Closeable { /** * The node this connection is associated with */ - ProtobufDiscoveryNode getNode(); + DiscoveryNode getNode(); /** * Sends the request to the node this connection is associated with @@ -484,8 +483,8 @@ final class RequestHandlers { private volatile Map> requestHandlers = Collections.emptyMap(); - private volatile Map> protobufRequestHandlers = Collections - .emptyMap(); + private volatile Map> protobufRequestHandlers = + Collections.emptyMap(); synchronized void registerHandler(RequestHandlerRegistry reg) { if (requestHandlers.containsKey(reg.getAction())) { diff --git a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java index a284269dc4151..27832c3c633ec 100644 --- a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java @@ -49,6 +49,8 @@ import org.opensearch.common.lifecycle.LifecycleListener; import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockLogAppender; @@ -57,6 +59,10 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.ConnectionProfile; +import org.opensearch.transport.ProtobufConnectionProfile; +import org.opensearch.transport.ProtobufTransportMessageListener; +import org.opensearch.transport.ProtobufTransportRequest; +import org.opensearch.transport.ProtobufTransportStats; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportMessageListener; @@ -67,6 +73,7 @@ import org.junit.After; import org.junit.Before; +import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -592,6 +599,8 @@ public void connectToNode(DiscoveryNode node, ActionListener listener) thr private static final class MockTransport implements Transport { private final ResponseHandlers responseHandlers = new ResponseHandlers(); private final RequestHandlers requestHandlers = new RequestHandlers(); + private final ProtobufResponseHandlers protobufResponseHandlers = new ProtobufResponseHandlers(); + private final ProtobufRequestHandlers protobufRequestHandlers = new ProtobufRequestHandlers(); private volatile boolean randomConnectionExceptions = false; private final ThreadPool threadPool; @@ -685,5 +694,75 @@ public ResponseHandlers getResponseHandlers() { public RequestHandlers getRequestHandlers() { return requestHandlers; } + + @Override + public void setMessageListener(ProtobufTransportMessageListener listener) {} + + @Override + public ProtobufBoundTransportAddress boundProtobufAddress() { + return null; + } + + @Override + public Map profileProtobufBoundAddresses() { + return null; + } + + @Override + public ProtobufTransportAddress[] addressesFromStringProtobuf(String address) throws UnknownHostException { + return new ProtobufTransportAddress[0]; + } + + @Override + public void openProtobufConnection( + DiscoveryNode node, + ProtobufConnectionProfile profile, + ActionListener listener + ) { + if (profile == null && randomConnectionExceptions && randomBoolean()) { + threadPool.generic().execute(() -> listener.onFailure(new ConnectTransportException(node, "simulated"))); + } else { + threadPool.generic().execute(() -> listener.onResponse(new ProtobufConnection() { + @Override + public DiscoveryNode getNode() { + return node; + } + + @Override + public void sendRequest( + long requestId, + String action, + ProtobufTransportRequest request, + TransportRequestOptions options + ) throws TransportException {} + + @Override + public void addCloseListener(ActionListener listener) {} + + @Override + public void close() {} + + @Override + public boolean isClosed() { + return false; + } + })); + } + } + + @Override + public ProtobufTransportStats getProtobufStats() { + throw new UnsupportedOperationException(); + } + + @Override + public ProtobufResponseHandlers getProtobufResponseHandlers() { + return protobufResponseHandlers; + } + + @Override + public ProtobufRequestHandlers getProtobufRequestHandlers() { + return protobufRequestHandlers; + } } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NoOpClusterApplier.java b/server/src/test/java/org/opensearch/cluster/coordination/NoOpClusterApplier.java index 9b865ace3b082..420223fa35178 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NoOpClusterApplier.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NoOpClusterApplier.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.coordination; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ProtobufClusterState; import org.opensearch.cluster.service.ClusterApplier; import java.util.function.Supplier; @@ -46,4 +47,9 @@ public void setInitialState(ClusterState initialState) { public void onNewClusterState(String source, Supplier clusterStateSupplier, ClusterApplyListener listener) { listener.onSuccess(source); } + + @Override + public void setInitialProtobufState(ProtobufClusterState initialState) { + + } } diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index f795df2f48b22..f52efbe0c5848 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -45,6 +45,8 @@ import org.opensearch.http.HttpServerTransport; import org.opensearch.http.HttpStats; import org.opensearch.http.NullDispatcher; +import org.opensearch.http.ProtobufHttpInfo; +import org.opensearch.http.ProtobufHttpStats; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.test.OpenSearchTestCase; @@ -103,6 +105,16 @@ public HttpInfo info() { public HttpStats stats() { return null; } + + @Override + public ProtobufHttpInfo protobufInfo() { + return null; + } + + @Override + public ProtobufHttpStats protobufStats() { + return null; + } } public void testRegisterTransport() { diff --git a/server/src/test/java/org/opensearch/rest/RestControllerTests.java b/server/src/test/java/org/opensearch/rest/RestControllerTests.java index 25a8637479ec5..1d54ff6012da9 100644 --- a/server/src/test/java/org/opensearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/opensearch/rest/RestControllerTests.java @@ -54,6 +54,8 @@ import org.opensearch.http.HttpServerTransport; import org.opensearch.http.HttpStats; import org.opensearch.identity.IdentityService; +import org.opensearch.http.ProtobufHttpInfo; +import org.opensearch.http.ProtobufHttpStats; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.rest.action.admin.indices.RestCreateIndexAction; import org.opensearch.test.OpenSearchTestCase; @@ -228,7 +230,7 @@ public void testRegisterAsDeprecatedHandler() { controller.registerAsDeprecatedHandler(method, path, handler, deprecationMessage); - verify(controller).registerProtobufHandler(eq(method), eq(path), any(DeprecationRestHandler.class)); + verify(controller).registerHandler(eq(method), eq(path), any(DeprecationRestHandler.class)); } public void testRegisterWithDeprecatedHandler() { @@ -255,7 +257,7 @@ public void testRegisterWithDeprecatedHandler() { controller.registerWithDeprecatedHandler(method, path, handler, deprecatedMethod, deprecatedPath); - verify(controller).registerProtobufHandler(method, path, handler); + verify(controller).registerHandler(method, path, handler); verify(controller).registerAsDeprecatedHandler(deprecatedMethod, deprecatedPath, handler, deprecationMessage); } @@ -691,6 +693,16 @@ public HttpInfo info() { public HttpStats stats() { return null; } + + @Override + public ProtobufHttpInfo protobufInfo() { + return null; + } + + @Override + public ProtobufHttpStats protobufStats() { + return null; + } } public static final class AssertingChannel extends AbstractRestChannel { diff --git a/server/src/test/java/org/opensearch/test/NoopDiscovery.java b/server/src/test/java/org/opensearch/test/NoopDiscovery.java index f7365d7d71693..5779ebd03aabd 100644 --- a/server/src/test/java/org/opensearch/test/NoopDiscovery.java +++ b/server/src/test/java/org/opensearch/test/NoopDiscovery.java @@ -37,6 +37,7 @@ import org.opensearch.common.lifecycle.LifecycleListener; import org.opensearch.discovery.Discovery; import org.opensearch.discovery.DiscoveryStats; +import org.opensearch.discovery.ProtobufDiscoveryStats; public class NoopDiscovery implements Discovery { @@ -78,4 +79,9 @@ public void stop() {} @Override public void close() {} + + @Override + public ProtobufDiscoveryStats protobufStats() { + return null; + } } diff --git a/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java b/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java index e156449adc184..51a9ee248e1ca 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java @@ -34,10 +34,14 @@ import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.transport.ProtobufBoundTransportAddress; +import org.opensearch.common.transport.ProtobufTransportAddress; import org.opensearch.common.transport.TransportAddress; import org.opensearch.http.HttpInfo; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.HttpStats; +import org.opensearch.http.ProtobufHttpInfo; +import org.opensearch.http.ProtobufHttpStats; import org.opensearch.plugins.Plugin; /** @@ -57,8 +61,18 @@ public static class TestPlugin extends Plugin {} new TransportAddress[] { DUMMY_TRANSPORT_ADDRESS }, DUMMY_TRANSPORT_ADDRESS ); + private static final ProtobufTransportAddress DUMMY_PROTOBUF_TRANSPORT_ADDRESS = new ProtobufTransportAddress( + ProtobufTransportAddress.META_ADDRESS, + 0 + ); + private static final ProtobufBoundTransportAddress DUMMY_PROTOBUF_BOUND_ADDRESS = new ProtobufBoundTransportAddress( + new ProtobufTransportAddress[] { DUMMY_PROTOBUF_TRANSPORT_ADDRESS }, + DUMMY_PROTOBUF_TRANSPORT_ADDRESS + ); private static final HttpInfo DUMMY_HTTP_INFO = new HttpInfo(DUMMY_BOUND_ADDRESS, 0); private static final HttpStats DUMMY_HTTP_STATS = new HttpStats(0, 0); + private static final ProtobufHttpInfo DUMMY_PROTOBUF_HTTP_INFO = new ProtobufHttpInfo(DUMMY_PROTOBUF_BOUND_ADDRESS, 0); + private static final ProtobufHttpStats DUMMY_PROTOBUF_HTTP_STATS = new ProtobufHttpStats(0, 0); @Override protected void doStart() {} @@ -83,4 +97,14 @@ public HttpInfo info() { public HttpStats stats() { return DUMMY_HTTP_STATS; } + + @Override + public ProtobufHttpInfo protobufInfo() { + return DUMMY_PROTOBUF_HTTP_INFO; + } + + @Override + public ProtobufHttpStats protobufStats() { + return DUMMY_PROTOBUF_HTTP_STATS; + } } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java index b70a143cff948..42e9ae078dae8 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.transport.ProtobufBoundTransportAddress; @@ -163,8 +163,7 @@ public ProtobufTransportAddress[] addressesFromStringProtobuf(String address) th } @Override - public void openProtobufConnection(ProtobufDiscoveryNode node, ProtobufConnectionProfile profile, - ActionListener listener) { + public void openProtobufConnection(DiscoveryNode node, ProtobufConnectionProfile profile, ActionListener listener) { // TODO Auto-generated method stub throw new UnsupportedOperationException("Unimplemented method 'openProtobufConnection'"); } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java index 9770e9e93d1e6..11ef579a3ac42 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.ProtobufDiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.lifecycle.LifecycleListener; import org.opensearch.common.transport.BoundTransportAddress; @@ -342,8 +342,7 @@ public ProtobufTransportAddress[] addressesFromStringProtobuf(String address) th } @Override - public void openProtobufConnection(ProtobufDiscoveryNode node, ProtobufConnectionProfile profile, - ActionListener listener) { + public void openProtobufConnection(DiscoveryNode node, ProtobufConnectionProfile profile, ActionListener listener) { // TODO Auto-generated method stub throw new UnsupportedOperationException("Unimplemented method 'openProtobufConnection'"); } From e6f61909a48175bd4b539bed93f8fd99c1b39735 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Mon, 10 Jul 2023 22:31:12 +0000 Subject: [PATCH 12/37] ProtobufClusterState to ClusterState Signed-off-by: Vacha Shah --- .../datastream/GetDataStreamAction.java | 10 ++- .../admin/indices/rollover/RolloverInfo.java | 10 ++- .../support/ProtobufTransportAction.java | 2 +- ...obufTransportClusterManagerNodeAction.java | 15 ++-- .../TransportClusterManagerNodeAction.java | 5 ++ .../opensearch/cluster/AbstractDiffable.java | 68 ++++++++++++++++++- .../cluster/ProtobufAbstractDiffable.java | 4 +- .../cluster/ProtobufClusterChangedEvent.java | 9 +-- .../cluster/ProtobufClusterState.java | 33 ++++----- .../opensearch/cluster/ProtobufDiffable.java | 2 +- .../cluster/ProtobufDiffableUtils.java | 2 +- .../cluster/block/ClusterBlocks.java | 10 ++- .../cluster/coordination/Coordinator.java | 6 +- .../cluster/metadata/AliasMetadata.java | 10 ++- .../cluster/metadata/ComponentTemplate.java | 10 ++- .../metadata/ComposableIndexTemplate.java | 10 ++- .../cluster/metadata/DataStream.java | 10 ++- .../metadata/IndexTemplateMetadata.java | 10 ++- .../cluster/metadata/MappingMetadata.java | 10 ++- .../opensearch/cluster/metadata/Template.java | 10 ++- .../cluster/node/DiscoveryNodes.java | 48 ++++++++++++- .../cluster/node/ProtobufDiscoveryNodes.java | 2 +- .../cluster/routing/IndexRoutingTable.java | 9 ++- .../ingest/PipelineConfiguration.java | 11 ++- .../main/java/org/opensearch/node/Node.java | 5 ++ .../action/cat/ProtobufRestNodesAction.java | 4 +- .../opensearch/script/StoredScriptSource.java | 15 +++- .../pipeline/PipelineConfiguration.java | 10 ++- .../org/opensearch/tasks/TaskManager.java | 19 ++---- .../ProtobufRemoteClusterConnection.java | 4 +- 30 files changed, 308 insertions(+), 65 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java index 543783b3de367..bab12921aa42e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java @@ -64,6 +64,8 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -166,7 +168,7 @@ public static class Response extends ActionResponse implements ToXContentObject * * @opensearch.internal */ - public static class DataStreamInfo extends AbstractDiffable implements ToXContentObject { + public static class DataStreamInfo extends AbstractDiffable implements ToXContentObject { public static final ParseField STATUS_FIELD = new ParseField("status"); public static final ParseField INDEX_TEMPLATE_FIELD = new ParseField("template"); @@ -235,6 +237,12 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(dataStream, dataStreamStatus, indexTemplate); } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'writeTo'"); + } } private final List dataStreams; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java index f58c842be374b..b21fbdbc29d99 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java @@ -45,6 +45,8 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.List; import java.util.Objects; @@ -54,7 +56,7 @@ * * @opensearch.internal */ -public class RolloverInfo extends AbstractDiffable implements Writeable, ToXContentFragment { +public class RolloverInfo extends AbstractDiffable implements Writeable, ToXContentFragment { public static final ParseField CONDITION_FIELD = new ParseField("met_conditions"); public static final ParseField TIME_FIELD = new ParseField("time"); @@ -151,4 +153,10 @@ public boolean equals(Object obj) { public String toString() { return Strings.toString(XContentType.JSON, this); } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'writeTo'"); + } } diff --git a/server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java b/server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java index 6f0356fb0dabc..b23b7c8e78106 100644 --- a/server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java @@ -48,7 +48,7 @@ protected ProtobufTransportAction(String actionName, ProtobufActionFilters actio private Releasable registerChildNode(ProtobufTaskId parentTask) { if (parentTask.isSet()) { - return taskManager.registerProtobufChildNode(parentTask.getId(), taskManager.localProtobufNode()); + return taskManager.registerProtobufChildNode(parentTask.getId(), taskManager.localNode()); } else { return () -> {}; } diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java index 55c915522b404..426af1f265898 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java @@ -30,7 +30,7 @@ import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.node.ProtobufDiscoveryNodes; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterManagerThrottlingException; import org.opensearch.cluster.service.ClusterService; @@ -215,9 +215,13 @@ public Exception getTimeoutException(Exception e) { protected void doStart(ProtobufClusterState clusterState) { try { - final ProtobufDiscoveryNodes nodes = clusterState.nodes(); - final DiscoveryNodes discoveryNodes = clusterService.state().nodes(); - if (discoveryNodes.isLocalNodeElectedClusterManager() || localExecute(request)) { + System.out.println("ProtobufTransportClusterManagerNodeAction.doStart"); + //this needs fixing + final DiscoveryNodes nodes = clusterService.state().nodes(); + System.out.println("nodes: " + nodes); + // final DiscoveryNodes discoveryNodes = clusterService.state().nodes(); + if (nodes.isLocalNodeElectedClusterManager() || localExecute(request)) { + System.out.println("ProtobufTransportClusterManagerNodeAction.doStart.isLocalNodeElectedClusterManager"); // check for block, if blocked, retry, else, execute locally final ClusterBlockException blockException = checkBlock(request, clusterState); if (blockException != null) { @@ -256,10 +260,13 @@ protected void doStart(ProtobufClusterState clusterState) { .execute(ActionRunnable.wrap(delegate, l -> clusterManagerOperation(task, request, clusterState, l))); } } else { + System.out.println("In else"); if (nodes.getClusterManagerNode() == null) { + System.out.println("In else if"); logger.debug("no known cluster-manager node, scheduling a retry"); retryOnMasterChange(clusterState, null); } else { + System.out.println("In else else"); DiscoveryNode clusterManagerNode = nodes.getClusterManagerNode(); final String actionName = getClusterManagerActionName(clusterManagerNode); transportService.sendRequest( diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 13c576bd120c7..5c00957f07d79 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -231,8 +231,11 @@ public Exception getTimeoutException(Exception e) { protected void doStart(ClusterState clusterState) { try { + System.out.println("TransportClusterManagerNodeAction.doStart"); final DiscoveryNodes nodes = clusterState.nodes(); + System.out.println("nodes: " + nodes); if (nodes.isLocalNodeElectedClusterManager() || localExecute(request)) { + System.out.println("TransportClusterManagerNodeAction.doStart.isLocalNodeElectedClusterManager"); // check for block, if blocked, retry, else, execute locally final ClusterBlockException blockException = checkBlock(request, clusterState); if (blockException != null) { @@ -271,7 +274,9 @@ protected void doStart(ClusterState clusterState) { .execute(ActionRunnable.wrap(delegate, l -> clusterManagerOperation(task, request, clusterState, l))); } } else { + System.out.println("In else"); if (nodes.getClusterManagerNode() == null) { + System.out.println("In else if"); logger.debug("no known cluster-manager node, scheduling a retry"); retryOnMasterChange(clusterState, null); } else { diff --git a/server/src/main/java/org/opensearch/cluster/AbstractDiffable.java b/server/src/main/java/org/opensearch/cluster/AbstractDiffable.java index 74af3472433ba..4f49ac5a0f7eb 100644 --- a/server/src/main/java/org/opensearch/cluster/AbstractDiffable.java +++ b/server/src/main/java/org/opensearch/cluster/AbstractDiffable.java @@ -33,9 +33,13 @@ package org.opensearch.cluster; import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.ProtobufWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; /** @@ -44,7 +48,7 @@ * * @opensearch.internal */ -public abstract class AbstractDiffable> implements Diffable { +public abstract class AbstractDiffable, V extends ProtobufDiffable> implements Diffable, ProtobufDiffable { private static final Diff EMPTY = new CompleteDiff<>(); @@ -58,6 +62,16 @@ public Diff diff(T previousState) { } } + @SuppressWarnings("unchecked") + @Override + public ProtobufDiff protobufDiff(V previousState) { + if (this.equals(previousState)) { + return (ProtobufDiff) EMPTY; + } else { + return new CompleteDiffProtobuf<>((V) this); + } + } + @SuppressWarnings("unchecked") public static > Diff readDiffFrom(Reader reader, StreamInput in) throws IOException { if (in.readBoolean()) { @@ -66,6 +80,14 @@ public static > Diff readDiffFrom(Reader reader, Str return (Diff) EMPTY; } + @SuppressWarnings("unchecked") + public static > ProtobufDiff readDiffFromProtobuf(ProtobufWriteable.Reader reader, CodedInputStream in) throws IOException { + if (in.readBool()) { + return new CompleteDiffProtobuf<>(reader.read(in)); + } + return (ProtobufDiff) EMPTY; + } + /** * A complete diff. * @@ -109,4 +131,48 @@ public T apply(T part) { } } } + + /** + * A complete diff. + * + * @opensearch.internal + */ + private static class CompleteDiffProtobuf> implements ProtobufDiff { + + @Nullable + private final T part; + + /** + * Creates simple diff with changes + */ + CompleteDiffProtobuf(T part) { + this.part = part; + } + + /** + * Creates simple diff without changes + */ + CompleteDiffProtobuf() { + this.part = null; + } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + if (part != null) { + out.writeBoolNoTag(true); + part.writeTo(out); + } else { + out.writeBoolNoTag(false); + } + } + + @Override + public T apply(T part) { + if (this.part != null) { + return this.part; + } else { + return part; + } + } + } } diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufAbstractDiffable.java b/server/src/main/java/org/opensearch/cluster/ProtobufAbstractDiffable.java index 9af03c8bc9463..3e90bb2a7d377 100644 --- a/server/src/main/java/org/opensearch/cluster/ProtobufAbstractDiffable.java +++ b/server/src/main/java/org/opensearch/cluster/ProtobufAbstractDiffable.java @@ -10,6 +10,8 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; + +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; import java.io.IOException; @@ -26,7 +28,7 @@ public abstract class ProtobufAbstractDiffable> im @SuppressWarnings("unchecked") @Override - public ProtobufDiff diff(T previousState) { + public ProtobufDiff protobufDiff(T previousState) { if (this.equals(previousState)) { return (ProtobufDiff) EMPTY; } else { diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufClusterChangedEvent.java b/server/src/main/java/org/opensearch/cluster/ProtobufClusterChangedEvent.java index 59f6f7bfeb29e..eab4bade647fe 100644 --- a/server/src/main/java/org/opensearch/cluster/ProtobufClusterChangedEvent.java +++ b/server/src/main/java/org/opensearch/cluster/ProtobufClusterChangedEvent.java @@ -12,7 +12,8 @@ import org.opensearch.cluster.metadata.IndexGraveyard.IndexGraveyardDiff; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.node.ProtobufDiscoveryNodes; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.gateway.GatewayService; import org.opensearch.index.Index; @@ -38,7 +39,7 @@ public class ProtobufClusterChangedEvent { private final ProtobufClusterState state; - private final ProtobufDiscoveryNodes.Delta nodesDelta; + private final DiscoveryNodes.Delta nodesDelta; public ProtobufClusterChangedEvent(String source, ProtobufClusterState state, ProtobufClusterState previousState) { Objects.requireNonNull(source, "source must not be null"); @@ -201,10 +202,10 @@ public boolean localNodeMaster() { } /** - * Returns the {@link org.opensearch.cluster.node.ProtobufDiscoveryNodes.Delta} between + * Returns the {@link org.opensearch.cluster.node.DiscoveryNodes.Delta} between * the previous cluster state and the new cluster state. */ - public ProtobufDiscoveryNodes.Delta nodesDelta() { + public DiscoveryNodes.Delta nodesDelta() { return this.nodesDelta; } diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java b/server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java index d1faa196a8dd3..b2354e5714494 100644 --- a/server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ProtobufClusterState.java @@ -25,8 +25,9 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.ProtobufDiscoveryNodes; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RoutingNodes; @@ -151,7 +152,7 @@ default boolean isPrivate() { private final RoutingTable routingTable; - private final ProtobufDiscoveryNodes nodes; + private final DiscoveryNodes nodes; private final Metadata metadata; @@ -189,7 +190,7 @@ public ProtobufClusterState( String stateUUID, Metadata metadata, RoutingTable routingTable, - ProtobufDiscoveryNodes nodes, + DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs, int minimumClusterManagerNodesOnPublishingClusterManager, @@ -234,11 +235,11 @@ public String stateUUID() { return this.stateUUID; } - public ProtobufDiscoveryNodes nodes() { + public DiscoveryNodes nodes() { return this.nodes; } - public ProtobufDiscoveryNodes getNodes() { + public DiscoveryNodes getNodes() { return nodes(); } @@ -578,7 +579,7 @@ public static class Builder { private String uuid = UNKNOWN_UUID; private Metadata metadata = Metadata.EMPTY_METADATA; private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE; - private ProtobufDiscoveryNodes nodes = ProtobufDiscoveryNodes.EMPTY_NODES; + private DiscoveryNodes nodes = DiscoveryNodes.EMPTY_NODES; private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK; private final ImmutableOpenMap.Builder customs; private boolean fromDiff; @@ -602,16 +603,16 @@ public Builder(ClusterName clusterName) { this.clusterName = clusterName; } - public Builder nodes(ProtobufDiscoveryNodes.Builder nodesBuilder) { + public Builder nodes(DiscoveryNodes.Builder nodesBuilder) { return nodes(nodesBuilder.build()); } - public Builder nodes(ProtobufDiscoveryNodes nodes) { + public Builder nodes(DiscoveryNodes nodes) { this.nodes = nodes; return this; } - public ProtobufDiscoveryNodes nodes() { + public DiscoveryNodes nodes() { return nodes; } @@ -717,7 +718,7 @@ public ProtobufClusterState build() { } @Override - public ProtobufDiff diff(ProtobufClusterState previousState) { + public ProtobufDiff protobufDiff(ProtobufClusterState previousState) { return new ClusterStateDiff(previousState, this); } @@ -733,7 +734,7 @@ public static ProtobufClusterState readFrom(CodedInputStream in, DiscoveryNode l builder.uuid = in.readString(); // builder.metadata = Metadata.readFrom(in); // builder.routingTable = RoutingTable.readFrom(in); - // builder.nodes = ProtobufDiscoveryNodes.readFrom(in, localNode); + builder.nodes = DiscoveryNodes.readFromProtobuf(in, localNode); // builder.blocks = ClusterBlocks.readFrom(in); int customSize = in.readInt32(); for (int i = 0; i < customSize; i++) { @@ -752,7 +753,7 @@ public void writeTo(CodedOutputStream out) throws IOException { out.writeStringNoTag(stateUUID); // metadata.writeTo(out); // routingTable.writeTo(out); - // nodes.writeTo(out); + nodes.writeTo(out); // blocks.writeTo(out); // filter out custom states not supported by the other node int numberOfCustoms = 0; @@ -787,7 +788,7 @@ private static class ClusterStateDiff implements ProtobufDiff routingTable; - private final ProtobufDiff nodes; + private final ProtobufDiff nodes; private final ProtobufDiff metadata; @@ -803,7 +804,7 @@ private static class ClusterStateDiff implements ProtobufDiff extends ProtobufWriteable { /** * Returns serializable object representing differences between this and previousState */ - ProtobufDiff diff(T previousState); + ProtobufDiff protobufDiff(T previousState); } diff --git a/server/src/main/java/org/opensearch/cluster/ProtobufDiffableUtils.java b/server/src/main/java/org/opensearch/cluster/ProtobufDiffableUtils.java index 32eecd526f997..fef838ff6eefa 100644 --- a/server/src/main/java/org/opensearch/cluster/ProtobufDiffableUtils.java +++ b/server/src/main/java/org/opensearch/cluster/ProtobufDiffableUtils.java @@ -713,7 +713,7 @@ public boolean supportsDiffableValues() { @Override public ProtobufDiff diff(V value, V beforePart) { - return value.diff(beforePart); + return value.protobufDiff(beforePart); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index 33d55e6e747a3..d5295aae148bc 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -32,6 +32,8 @@ package org.opensearch.cluster.block; +import com.google.protobuf.CodedOutputStream; + import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.cluster.metadata.IndexMetadata; @@ -61,7 +63,7 @@ * * @opensearch.internal */ -public class ClusterBlocks extends AbstractDiffable { +public class ClusterBlocks extends AbstractDiffable { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(emptySet(), Map.of()); private final Set global; @@ -478,4 +480,10 @@ public ClusterBlocks build() { return new ClusterBlocks(unmodifiableSet(new HashSet<>(global)), indicesBuilder); } } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'writeTo'"); + } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index ba2e4560beec1..3df16442b9fa7 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -42,7 +42,6 @@ import org.opensearch.cluster.ClusterStateTaskConfig; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.LocalClusterUpdateTask; -import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ProtobufClusterState; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.ClusterFormationFailureHelper.ClusterFormationState; @@ -54,7 +53,6 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.node.ProtobufDiscoveryNodes; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplier; @@ -861,8 +859,10 @@ protected void doStart() { .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK) .addGlobalBlock(noClusterManagerBlockService.getNoClusterManagerBlock()) ) - .nodes(ProtobufDiscoveryNodes.builder().add(getLocalNode()).localNodeId(getLocalNode().getId())) + .nodes(DiscoveryNodes.builder().add(getLocalNode()).localNodeId(getLocalNode().getId())) .build(); + System.out.println("cluster state initial: " + initialState); + System.out.println("protobuf cluster state initial: " + protobufInitialState); applierState = initialState; clusterApplier.setInitialState(initialState); clusterApplier.setInitialProtobufState(protobufInitialState); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java index 27ecca0358bd8..05ea4ff948386 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java @@ -51,6 +51,8 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.Collections; import java.util.Map; @@ -64,7 +66,7 @@ * * @opensearch.internal */ -public class AliasMetadata extends AbstractDiffable implements ToXContentFragment { +public class AliasMetadata extends AbstractDiffable implements ToXContentFragment { private final String alias; @@ -431,4 +433,10 @@ public static AliasMetadata fromXContent(XContentParser parser) throws IOExcepti return builder.build(); } } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'writeTo'"); + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java index 52096422248a5..03aedc2635e20 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java @@ -45,6 +45,8 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.Map; import java.util.Objects; @@ -57,7 +59,7 @@ * * @opensearch.internal */ -public class ComponentTemplate extends AbstractDiffable implements ToXContentObject { +public class ComponentTemplate extends AbstractDiffable implements ToXContentObject { private static final ParseField TEMPLATE = new ParseField("template"); private static final ParseField VERSION = new ParseField("version"); private static final ParseField METADATA = new ParseField("_meta"); @@ -168,4 +170,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'writeTo'"); + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index 15e5cb5873719..b7d7ffecd5001 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -49,6 +49,8 @@ import org.opensearch.index.mapper.DataStreamFieldMapper; import org.opensearch.index.mapper.MapperService; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.Collections; import java.util.List; @@ -65,7 +67,7 @@ * * @opensearch.internal */ -public class ComposableIndexTemplate extends AbstractDiffable implements ToXContentObject { +public class ComposableIndexTemplate extends AbstractDiffable implements ToXContentObject { private static final ParseField INDEX_PATTERNS = new ParseField("index_patterns"); private static final ParseField TEMPLATE = new ParseField("template"); private static final ParseField PRIORITY = new ParseField("priority"); @@ -358,4 +360,10 @@ public int hashCode() { return Objects.hash(timestampField); } } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'writeTo'"); + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java b/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java index 76e1a0197bc1c..391f005e224d1 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java @@ -47,6 +47,8 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.index.Index; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -61,7 +63,7 @@ * * @opensearch.internal */ -public final class DataStream extends AbstractDiffable implements ToXContentObject { +public final class DataStream extends AbstractDiffable implements ToXContentObject { public static final String BACKING_INDEX_PREFIX = ".ds-"; public static final String TIMESERIES_FIELDNAME = "@timestamp"; @@ -318,4 +320,10 @@ public int hashCode() { return Objects.hash(name); } } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'writeTo'"); + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java index e430ba5465499..bb286cde40781 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java @@ -31,6 +31,8 @@ package org.opensearch.cluster.metadata; +import com.google.protobuf.CodedOutputStream; + import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; @@ -66,7 +68,7 @@ * * @opensearch.internal */ -public class IndexTemplateMetadata extends AbstractDiffable { +public class IndexTemplateMetadata extends AbstractDiffable { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IndexTemplateMetadata.class); @@ -542,4 +544,10 @@ private static String skipTemplateName(XContentParser parser) throws IOException } } + @Override + public void writeTo(CodedOutputStream out) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'writeTo'"); + } + } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java index c77d2d6b8b2b3..2074511a1f075 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java @@ -45,6 +45,8 @@ import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.MapperService; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.io.UncheckedIOException; import java.util.Collections; @@ -58,7 +60,7 @@ * * @opensearch.internal */ -public class MappingMetadata extends AbstractDiffable { +public class MappingMetadata extends AbstractDiffable { public static final MappingMetadata EMPTY_MAPPINGS = new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, Collections.emptyMap()); private final String type; @@ -191,4 +193,10 @@ public MappingMetadata(StreamInput in) throws IOException { public static Diff readDiffFrom(StreamInput in) throws IOException { return readDiffFrom(MappingMetadata::new, in); } + + @Override + public void writeTo(CodedOutputStream out) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'writeTo'"); + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Template.java b/server/src/main/java/org/opensearch/cluster/metadata/Template.java index 45d11dd9250e8..326711ffc8274 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Template.java @@ -49,6 +49,8 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.MapperService; +import com.google.protobuf.CodedOutputStream; + import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -61,7 +63,7 @@ * * @opensearch.internal */ -public class Template extends AbstractDiffable