From e146f13a69c8bee87a936416e8998710521518a3 Mon Sep 17 00:00:00 2001 From: Pranshu Shukla <55992439+Pranshu-S@users.noreply.github.com> Date: Thu, 29 Aug 2024 21:12:15 +0530 Subject: [PATCH] Optimize NodeIndicesStats output behind flag (#14454) * Optimize NodeIndicesStats output behind flag Signed-off-by: Pranshu Shukla --- CHANGELOG.md | 1 + .../org/opensearch/nodestats/NodeStatsIT.java | 309 ++++++++++++++ .../admin/indices/stats/CommonStatsFlags.java | 15 + .../opensearch/indices/IndicesService.java | 8 +- .../opensearch/indices/NodeIndicesStats.java | 199 +++++++-- .../admin/cluster/RestNodesStatsAction.java | 1 + .../rest/action/cat/RestNodesAction.java | 1 + .../cluster/node/stats/NodeStatsTests.java | 400 ++++++++++++++++++ 8 files changed, 904 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f8b695205e789..cbfde6a1c1a80 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add concurrent search support for Derived Fields ([#15326](https://github.com/opensearch-project/OpenSearch/pull/15326)) - [Workload Management] Add query group stats constructs ([#15343](https://github.com/opensearch-project/OpenSearch/pull/15343))) - Add runAs to Subject interface and introduce IdentityAwarePlugin extension point ([#14630](https://github.com/opensearch-project/OpenSearch/pull/14630)) +- Optimize NodeIndicesStats output behind flag ([#14454](https://github.com/opensearch-project/OpenSearch/pull/14454)) ### Dependencies - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) diff --git a/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java index f270cb1399072..22c1679babb52 100644 --- a/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java @@ -10,6 +10,9 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkResponse; @@ -19,21 +22,35 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.update.UpdateRequest; import org.opensearch.action.update.UpdateResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.engine.DocumentMissingException; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; +import org.opensearch.indices.NodeIndicesStats; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; import org.hamcrest.MatcherAssert; +import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import static java.util.Collections.singletonMap; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -243,6 +260,280 @@ public void testNodeIndicesStatsDocStatusStatsCreateDeleteUpdate() { } } + public void testNodeIndicesStatsDocStatsWithAggregations() { + { // Testing Create + final String INDEX = "create_index"; + final String ID = "id"; + DocStatusStats expectedDocStatusStats = new DocStatusStats(); + + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE).create(true)).actionGet(); + expectedDocStatusStats.inc(response.status()); + + CommonStatsFlags commonStatsFlags = new CommonStatsFlags(); + commonStatsFlags.setIncludeIndicesStatsByLevel(true); + + DocStatusStats docStatusStats = client().admin() + .cluster() + .prepareNodesStats() + .setIndices(commonStatsFlags) + .execute() + .actionGet() + .getNodes() + .get(0) + .getIndices() + .getIndexing() + .getTotal() + .getDocStatusStats(); + + assertTrue( + Arrays.equals( + docStatusStats.getDocStatusCounter(), + expectedDocStatusStats.getDocStatusCounter(), + Comparator.comparingLong(AtomicLong::longValue) + ) + ); + } + } + + /** + * Default behavior - without consideration of request level param on level, the NodeStatsRequest always + * returns ShardStats which is aggregated on the coordinator node when creating the XContent. + */ + public void testNodeIndicesStatsXContentWithoutAggregationOnNodes() { + List testLevels = new ArrayList<>(); + testLevels.add("null"); + testLevels.add(NodeIndicesStats.StatsLevel.NODE.getRestName()); + testLevels.add(NodeIndicesStats.StatsLevel.INDICES.getRestName()); + testLevels.add(NodeIndicesStats.StatsLevel.SHARDS.getRestName()); + testLevels.add("unknown"); + + internalCluster().startNode(); + ensureGreen(); + String indexName = "test1"; + assertAcked( + prepareCreate( + indexName, + clusterService().state().getNodes().getSize(), + Settings.builder().put("number_of_shards", 2).put("number_of_replicas", clusterService().state().getNodes().getSize() - 1) + ) + ); + ensureGreen(); + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + + testLevels.forEach(testLevel -> { + NodesStatsResponse response; + if (!testLevel.equals("null")) { + ArrayList level_arg = new ArrayList<>(); + level_arg.add(testLevel); + + CommonStatsFlags commonStatsFlags = new CommonStatsFlags(); + commonStatsFlags.setLevels(level_arg.toArray(new String[0])); + response = client().admin().cluster().prepareNodesStats().setIndices(commonStatsFlags).get(); + } else { + response = client().admin().cluster().prepareNodesStats().get(); + } + + NodeStats nodeStats = response.getNodes().get(0); + assertNotNull(nodeStats.getIndices().getShardStats(clusterState.metadata().index(indexName).getIndex())); + try { + // Without any param - default is level = nodes + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder = nodeStats.getIndices().toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + Map xContentMap = xContentBuilderToMap(builder); + LinkedHashMap indicesStatsMap = (LinkedHashMap) xContentMap.get(NodeIndicesStats.StatsLevel.INDICES.getRestName()); + assertFalse(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.INDICES)); + assertFalse(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.SHARDS)); + + // With param containing level as 'indices', the indices stats are returned + builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder = nodeStats.getIndices() + .toXContent( + builder, + new ToXContent.MapParams(Collections.singletonMap("level", NodeIndicesStats.StatsLevel.INDICES.getRestName())) + ); + builder.endObject(); + + xContentMap = xContentBuilderToMap(builder); + indicesStatsMap = (LinkedHashMap) xContentMap.get(NodeIndicesStats.StatsLevel.INDICES.getRestName()); + assertTrue(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.INDICES.getRestName())); + assertFalse(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.SHARDS.getRestName())); + + LinkedHashMap indexLevelStats = (LinkedHashMap) indicesStatsMap.get(NodeIndicesStats.StatsLevel.INDICES.getRestName()); + assertTrue(indexLevelStats.containsKey(indexName)); + + // With param containing level as 'shards', the shard stats are returned + builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder = nodeStats.getIndices() + .toXContent( + builder, + new ToXContent.MapParams(Collections.singletonMap("level", NodeIndicesStats.StatsLevel.SHARDS.getRestName())) + ); + builder.endObject(); + + xContentMap = xContentBuilderToMap(builder); + indicesStatsMap = (LinkedHashMap) xContentMap.get(NodeIndicesStats.StatsLevel.INDICES.getRestName()); + assertFalse(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.INDICES.getRestName())); + assertTrue(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.SHARDS.getRestName())); + + LinkedHashMap shardLevelStats = (LinkedHashMap) indicesStatsMap.get(NodeIndicesStats.StatsLevel.SHARDS.getRestName()); + assertTrue(shardLevelStats.containsKey(indexName)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + /** + * Aggregated behavior - to avoid unnecessary IO in the form of shard-stats when not required, we not honor the levels on the + * individual data nodes instead and pre-compute information as required. + */ + public void testNodeIndicesStatsXContentWithAggregationOnNodes() { + List testLevels = new ArrayList<>(); + + testLevels.add(MockStatsLevel.NULL); + testLevels.add(MockStatsLevel.NODE); + testLevels.add(MockStatsLevel.INDICES); + testLevels.add(MockStatsLevel.SHARDS); + + internalCluster().startNode(); + ensureGreen(); + String indexName = "test1"; + assertAcked( + prepareCreate( + indexName, + clusterService().state().getNodes().getSize(), + Settings.builder().put("number_of_shards", 2).put("number_of_replicas", clusterService().state().getNodes().getSize() - 1) + ) + ); + ensureGreen(); + + testLevels.forEach(testLevel -> { + NodesStatsResponse response; + CommonStatsFlags commonStatsFlags = new CommonStatsFlags(); + commonStatsFlags.setIncludeIndicesStatsByLevel(true); + if (!testLevel.equals(MockStatsLevel.NULL)) { + ArrayList level_arg = new ArrayList<>(); + level_arg.add(testLevel.getRestName()); + + commonStatsFlags.setLevels(level_arg.toArray(new String[0])); + } + response = client().admin().cluster().prepareNodesStats().setIndices(commonStatsFlags).get(); + + NodeStats nodeStats = response.getNodes().get(0); + try { + XContentBuilder builder = XContentFactory.jsonBuilder(); + + builder.startObject(); + + if (!testLevel.equals(MockStatsLevel.SHARDS)) { + final XContentBuilder failedBuilder = builder; + assertThrows( + "Expected shard stats in response for generating [SHARDS] field", + AssertionError.class, + () -> nodeStats.getIndices() + .toXContent( + failedBuilder, + new ToXContent.MapParams( + Collections.singletonMap("level", NodeIndicesStats.StatsLevel.SHARDS.getRestName()) + ) + ) + ); + } else { + builder = nodeStats.getIndices() + .toXContent( + builder, + new ToXContent.MapParams(Collections.singletonMap("level", NodeIndicesStats.StatsLevel.SHARDS.getRestName())) + ); + builder.endObject(); + + Map xContentMap = xContentBuilderToMap(builder); + LinkedHashMap indicesStatsMap = (LinkedHashMap) xContentMap.get(NodeIndicesStats.StatsLevel.INDICES.getRestName()); + LinkedHashMap indicesStats = (LinkedHashMap) indicesStatsMap.get(NodeIndicesStats.StatsLevel.INDICES.getRestName()); + LinkedHashMap shardStats = (LinkedHashMap) indicesStatsMap.get(NodeIndicesStats.StatsLevel.SHARDS.getRestName()); + + assertFalse(shardStats.isEmpty()); + assertNull(indicesStats); + } + + builder = XContentFactory.jsonBuilder(); + builder.startObject(); + + if (!(testLevel.equals(MockStatsLevel.SHARDS) || testLevel.equals(MockStatsLevel.INDICES))) { + final XContentBuilder failedBuilder = builder; + assertThrows( + "Expected shard stats or index stats in response for generating INDICES field", + AssertionError.class, + () -> nodeStats.getIndices() + .toXContent( + failedBuilder, + new ToXContent.MapParams( + Collections.singletonMap("level", NodeIndicesStats.StatsLevel.INDICES.getRestName()) + ) + ) + ); + } else { + builder = nodeStats.getIndices() + .toXContent( + builder, + new ToXContent.MapParams(Collections.singletonMap("level", NodeIndicesStats.StatsLevel.INDICES.getRestName())) + ); + builder.endObject(); + + Map xContentMap = xContentBuilderToMap(builder); + LinkedHashMap indicesStatsMap = (LinkedHashMap) xContentMap.get(NodeIndicesStats.StatsLevel.INDICES.getRestName()); + LinkedHashMap indicesStats = (LinkedHashMap) indicesStatsMap.get(NodeIndicesStats.StatsLevel.INDICES.getRestName()); + LinkedHashMap shardStats = (LinkedHashMap) indicesStatsMap.get(NodeIndicesStats.StatsLevel.SHARDS.getRestName()); + + switch (testLevel) { + case SHARDS: + case INDICES: + assertNull(shardStats); + assertFalse(indicesStats.isEmpty()); + break; + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + public void testNodeIndicesStatsUnknownLevelThrowsException() { + MockStatsLevel testLevel = MockStatsLevel.UNKNOWN; + internalCluster().startNode(); + ensureGreen(); + String indexName = "test1"; + assertAcked( + prepareCreate( + indexName, + clusterService().state().getNodes().getSize(), + Settings.builder().put("number_of_shards", 2).put("number_of_replicas", clusterService().state().getNodes().getSize() - 1) + ) + ); + ensureGreen(); + + NodesStatsResponse response; + CommonStatsFlags commonStatsFlags = new CommonStatsFlags(); + commonStatsFlags.setIncludeIndicesStatsByLevel(true); + ArrayList level_arg = new ArrayList<>(); + level_arg.add(testLevel.getRestName()); + + commonStatsFlags.setLevels(level_arg.toArray(new String[0])); + response = client().admin().cluster().prepareNodesStats().setIndices(commonStatsFlags).get(); + + assertTrue(response.hasFailures()); + assertEquals("Level provided is not supported by NodeIndicesStats", response.failures().get(0).getCause().getCause().getMessage()); + } + + private Map xContentBuilderToMap(XContentBuilder xContentBuilder) { + return XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(); + } + private void assertDocStatusStats() { DocStatusStats docStatusStats = client().admin() .cluster() @@ -273,4 +564,22 @@ private void updateExpectedDocStatusCounter(Exception e) { expectedDocStatusStats.inc(ExceptionsHelper.status(e)); } + private enum MockStatsLevel { + INDICES(NodeIndicesStats.StatsLevel.INDICES.getRestName()), + SHARDS(NodeIndicesStats.StatsLevel.SHARDS.getRestName()), + NODE(NodeIndicesStats.StatsLevel.NODE.getRestName()), + NULL("null"), + UNKNOWN("unknown"); + + private final String restName; + + MockStatsLevel(String restName) { + this.restName = restName; + } + + public String getRestName() { + return restName; + } + } + } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index ca2685e093d3f..04f39d77ce6c8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -67,6 +67,7 @@ public class CommonStatsFlags implements Writeable, Cloneable { // Used for metric CACHE_STATS, to determine which caches to report stats for private EnumSet includeCaches = EnumSet.noneOf(CacheType.class); private String[] levels = new String[0]; + private boolean includeIndicesStatsByLevel = false; /** * @param flags flags to set. If no flags are supplied, default flags will be set. @@ -100,6 +101,9 @@ public CommonStatsFlags(StreamInput in) throws IOException { includeCaches = in.readEnumSet(CacheType.class); levels = in.readStringArray(); } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + includeIndicesStatsByLevel = in.readBoolean(); + } } @Override @@ -124,6 +128,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnumSet(includeCaches); out.writeStringArrayNullable(levels); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(includeIndicesStatsByLevel); + } } /** @@ -262,6 +269,14 @@ public boolean includeSegmentFileSizes() { return this.includeSegmentFileSizes; } + public void setIncludeIndicesStatsByLevel(boolean includeIndicesStatsByLevel) { + this.includeIndicesStatsByLevel = includeIndicesStatsByLevel; + } + + public boolean getIncludeIndicesStatsByLevel() { + return this.includeIndicesStatsByLevel; + } + public boolean isSet(Flag flag) { return flags.contains(flag); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index a78328e24c588..be16d4ea184fa 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -693,8 +693,12 @@ public NodeIndicesStats stats(CommonStatsFlags flags) { break; } } - - return new NodeIndicesStats(commonStats, statsByShard(this, flags), searchRequestStats); + if (flags.getIncludeIndicesStatsByLevel()) { + NodeIndicesStats.StatsLevel statsLevel = NodeIndicesStats.getAcceptedLevel(flags.getLevels()); + return new NodeIndicesStats(commonStats, statsByShard(this, flags), searchRequestStats, statsLevel); + } else { + return new NodeIndicesStats(commonStats, statsByShard(this, flags), searchRequestStats); + } } Map> statsByShard(final IndicesService indicesService, final CommonStatsFlags flags) { diff --git a/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java b/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java index 35b6fd395ee12..83a759cdb71c5 100644 --- a/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java +++ b/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java @@ -32,6 +32,7 @@ package org.opensearch.indices; +import org.opensearch.Version; import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.IndexShardStats; import org.opensearch.action.admin.indices.stats.ShardStats; @@ -63,9 +64,11 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; /** * Global information on indices stats running on a specific node. @@ -74,26 +77,27 @@ */ @PublicApi(since = "1.0.0") public class NodeIndicesStats implements Writeable, ToXContentFragment { - private CommonStats stats; - private Map> statsByShard; + protected CommonStats stats; + protected Map statsByIndex; + protected Map> statsByShard; public NodeIndicesStats(StreamInput in) throws IOException { stats = new CommonStats(in); - if (in.readBoolean()) { - int entries = in.readVInt(); - statsByShard = new HashMap<>(); - for (int i = 0; i < entries; i++) { - Index index = new Index(in); - int indexShardListSize = in.readVInt(); - List indexShardStats = new ArrayList<>(indexShardListSize); - for (int j = 0; j < indexShardListSize; j++) { - indexShardStats.add(new IndexShardStats(in)); - } - statsByShard.put(index, indexShardStats); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + // contains statsByIndex + if (in.readBoolean()) { + statsByIndex = readStatsByIndex(in); } } + if (in.readBoolean()) { + statsByShard = readStatsByShard(in); + } } + /** + * Without passing the information of the levels to the constructor, we return the Node-level aggregated stats as + * {@link CommonStats} along with a hash-map containing Index to List of Shard Stats. + */ public NodeIndicesStats(CommonStats oldStats, Map> statsByShard, SearchRequestStats searchRequestStats) { // this.stats = stats; this.statsByShard = statsByShard; @@ -112,6 +116,90 @@ public NodeIndicesStats(CommonStats oldStats, Map> } } + /** + * Passing the level information to the nodes allows us to aggregate the stats based on the level passed. This + * allows us to aggregate based on NodeLevel (default - if no level is passed) or Index level if `indices` level is + * passed and finally return the statsByShards map if `shards` level is passed. This allows us to reduce ser/de of + * stats and return only the information that is required while returning to the client. + */ + public NodeIndicesStats( + CommonStats oldStats, + Map> statsByShard, + SearchRequestStats searchRequestStats, + StatsLevel level + ) { + // make a total common stats from old ones and current ones + this.stats = oldStats; + for (List shardStatsList : statsByShard.values()) { + for (IndexShardStats indexShardStats : shardStatsList) { + for (ShardStats shardStats : indexShardStats.getShards()) { + stats.add(shardStats.getStats()); + } + } + } + + if (this.stats.search != null) { + this.stats.search.setSearchRequestStats(searchRequestStats); + } + + if (level != null) { + switch (level) { + case INDICES: + this.statsByIndex = createStatsByIndex(statsByShard); + break; + case SHARDS: + this.statsByShard = statsByShard; + break; + } + } + } + + /** + * By default, the levels passed from the transport action will be a list of strings, since NodeIndicesStats can + * only aggregate on one level, we pick the first accepted level else we ignore if no known level is passed. Level is + * selected based on enum defined in {@link StatsLevel} + * + * Note - we are picking the first level as multiple levels are not supported in the previous versions. + * @param levels - levels sent in the request. + * + * @return Corresponding identified enum {@link StatsLevel} + */ + public static StatsLevel getAcceptedLevel(String[] levels) { + if (levels != null && levels.length > 0) { + Optional level = Arrays.stream(StatsLevel.values()) + .filter(field -> field.getRestName().equals(levels[0])) + .findFirst(); + return level.orElseThrow(() -> new IllegalArgumentException("Level provided is not supported by NodeIndicesStats")); + } + return null; + } + + private Map readStatsByIndex(StreamInput in) throws IOException { + Map statsByIndex = new HashMap<>(); + int indexEntries = in.readVInt(); + for (int i = 0; i < indexEntries; i++) { + Index index = new Index(in); + CommonStats commonStats = new CommonStats(in); + statsByIndex.put(index, commonStats); + } + return statsByIndex; + } + + private Map> readStatsByShard(StreamInput in) throws IOException { + Map> statsByShard = new HashMap<>(); + int entries = in.readVInt(); + for (int i = 0; i < entries; i++) { + Index index = new Index(in); + int indexShardListSize = in.readVInt(); + List indexShardStats = new ArrayList<>(indexShardListSize); + for (int j = 0; j < indexShardListSize; j++) { + indexShardStats.add(new IndexShardStats(in)); + } + statsByShard.put(index, indexShardStats); + } + return statsByShard; + } + @Nullable public StoreStats getStore() { return stats.getStore(); @@ -195,7 +283,31 @@ public RecoveryStats getRecoveryStats() { @Override public void writeTo(StreamOutput out) throws IOException { stats.writeTo(out); + + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(statsByIndex != null); + if (statsByIndex != null) { + writeStatsByIndex(out); + } + } + out.writeBoolean(statsByShard != null); + if (statsByShard != null) { + writeStatsByShards(out); + } + } + + private void writeStatsByIndex(StreamOutput out) throws IOException { + if (statsByIndex != null) { + out.writeVInt(statsByIndex.size()); + for (Map.Entry entry : statsByIndex.entrySet()) { + entry.getKey().writeTo(out); + entry.getValue().writeTo(out); + } + } + } + + private void writeStatsByShards(StreamOutput out) throws IOException { if (statsByShard != null) { out.writeVInt(statsByShard.size()); for (Map.Entry> entry : statsByShard.entrySet()) { @@ -210,29 +322,46 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - final String level = params.param("level", "node"); - final boolean isLevelValid = "indices".equalsIgnoreCase(level) - || "node".equalsIgnoreCase(level) - || "shards".equalsIgnoreCase(level); + final String level = params.param("level", StatsLevel.NODE.getRestName()); + final boolean isLevelValid = StatsLevel.NODE.getRestName().equalsIgnoreCase(level) + || StatsLevel.INDICES.getRestName().equalsIgnoreCase(level) + || StatsLevel.SHARDS.getRestName().equalsIgnoreCase(level); if (!isLevelValid) { - throw new IllegalArgumentException("level parameter must be one of [indices] or [node] or [shards] but was [" + level + "]"); + throw new IllegalArgumentException( + "level parameter must be one of [" + + StatsLevel.INDICES.getRestName() + + "] or [" + + StatsLevel.NODE.getRestName() + + "] or [" + + StatsLevel.SHARDS.getRestName() + + "] but was [" + + level + + "]" + ); } // "node" level - builder.startObject(Fields.INDICES); + builder.startObject(StatsLevel.INDICES.getRestName()); stats.toXContent(builder, params); - if ("indices".equals(level)) { - Map indexStats = createStatsByIndex(); - builder.startObject(Fields.INDICES); - for (Map.Entry entry : indexStats.entrySet()) { + if (StatsLevel.INDICES.getRestName().equals(level)) { + assert statsByIndex != null || statsByShard != null : "Expected shard stats or index stats in response for generating [" + + StatsLevel.INDICES + + "] field"; + if (statsByIndex == null) { + statsByIndex = createStatsByIndex(statsByShard); + } + + builder.startObject(StatsLevel.INDICES.getRestName()); + for (Map.Entry entry : statsByIndex.entrySet()) { builder.startObject(entry.getKey().getName()); entry.getValue().toXContent(builder, params); builder.endObject(); } builder.endObject(); - } else if ("shards".equals(level)) { - builder.startObject("shards"); + } else if (StatsLevel.SHARDS.getRestName().equals(level)) { + builder.startObject(StatsLevel.SHARDS.getRestName()); + assert statsByShard != null : "Expected shard stats in response for generating [" + StatsLevel.SHARDS + "] field"; for (Map.Entry> entry : statsByShard.entrySet()) { builder.startArray(entry.getKey().getName()); for (IndexShardStats indexShardStats : entry.getValue()) { @@ -251,7 +380,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private Map createStatsByIndex() { + private Map createStatsByIndex(Map> statsByShard) { Map statsMap = new HashMap<>(); for (Map.Entry> entry : statsByShard.entrySet()) { if (!statsMap.containsKey(entry.getKey())) { @@ -281,7 +410,21 @@ public List getShardStats(Index index) { * * @opensearch.internal */ - static final class Fields { - static final String INDICES = "indices"; + @PublicApi(since = "3.0.0") + public enum StatsLevel { + INDICES("indices"), + SHARDS("shards"), + NODE("node"); + + private final String restName; + + StatsLevel(String restName) { + this.restName = restName; + } + + public String getRestName() { + return restName; + } + } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java index ed9c0b171aa56..0119731e4a0d7 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -233,6 +233,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String[] levels = Strings.splitStringByCommaToArray(request.param("level")); nodesStatsRequest.indices().setLevels(levels); nodesStatsRequest.setIncludeDiscoveryNodes(false); + nodesStatsRequest.indices().setIncludeIndicesStatsByLevel(true); return channel -> client.admin().cluster().nodesStats(nodesStatsRequest, new NodesResponseRestListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index 0330fe627ccd0..1aa40b50290cd 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -148,6 +148,7 @@ public void processResponse(final NodesInfoResponse nodesInfoResponse) { NodesStatsRequest.Metric.PROCESS.metricName(), NodesStatsRequest.Metric.SCRIPT.metricName() ); + nodesStatsRequest.indices().setIncludeIndicesStatsByLevel(true); client.admin().cluster().nodesStats(nodesStatsRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(NodesStatsResponse nodesStatsResponse) throws Exception { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index f7bc96bdfe769..a0225a0bf6193 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -32,13 +32,19 @@ package org.opensearch.action.admin.cluster.node.stats; +import org.opensearch.Version; import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.admin.indices.stats.IndexShardStats; +import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.search.SearchRequestStats; import org.opensearch.cluster.coordination.PendingClusterStateStats; import org.opensearch.cluster.coordination.PersistedStateStats; import org.opensearch.cluster.coordination.PublishClusterStateStats; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterManagerThrottlingStats; import org.opensearch.cluster.service.ClusterStateStats; @@ -52,17 +58,31 @@ import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; import org.opensearch.core.indices.breaker.CircuitBreakerStats; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.discovery.DiscoveryStats; import org.opensearch.gateway.remote.RemotePersistenceStats; import org.opensearch.http.HttpStats; import org.opensearch.index.ReplicationStats; import org.opensearch.index.SegmentReplicationRejectionStats; +import org.opensearch.index.cache.query.QueryCacheStats; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.fielddata.FieldDataStats; +import org.opensearch.index.flush.FlushStats; import org.opensearch.index.remote.RemoteSegmentStats; import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.index.shard.DocsStats; +import org.opensearch.index.shard.IndexingStats; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.store.StoreStats; import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.indices.NodeIndicesStats; import org.opensearch.ingest.IngestStats; @@ -82,17 +102,20 @@ import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; +import org.opensearch.search.suggest.completion.CompletionStats; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPoolStats; import org.opensearch.transport.TransportStats; import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -1065,4 +1088,381 @@ private static RemoteTranslogTransferTracker.Stats getRandomRemoteTranslogTransf private OperationStats getPipelineStats(List pipelineStats, String id) { return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null); } + + public static class MockNodeIndicesStats extends NodeIndicesStats { + + public MockNodeIndicesStats(StreamInput in) throws IOException { + super(in); + } + + public MockNodeIndicesStats( + CommonStats oldStats, + Map> statsByShard, + SearchRequestStats searchRequestStats + ) { + super(oldStats, statsByShard, searchRequestStats); + } + + public MockNodeIndicesStats( + CommonStats oldStats, + Map> statsByShard, + SearchRequestStats searchRequestStats, + StatsLevel level + ) { + super(oldStats, statsByShard, searchRequestStats, level); + } + + public CommonStats getStats() { + return this.stats; + } + + public Map getStatsByIndex() { + return this.statsByIndex; + } + + public Map> getStatsByShard() { + return this.statsByShard; + } + } + + public void testOldVersionNodes() throws IOException { + long numDocs = randomLongBetween(0, 10000); + long numDeletedDocs = randomLongBetween(0, 100); + CommonStats commonStats = new CommonStats(CommonStatsFlags.NONE); + + commonStats.docs = new DocsStats(numDocs, numDeletedDocs, 0); + commonStats.store = new StoreStats(100, 0L); + commonStats.indexing = new IndexingStats(); + DocsStats hostDocStats = new DocsStats(numDocs, numDeletedDocs, 0); + + CommonStatsFlags commonStatsFlags = new CommonStatsFlags(); + commonStatsFlags.clear(); + commonStatsFlags.set(CommonStatsFlags.Flag.Docs, true); + commonStatsFlags.set(CommonStatsFlags.Flag.Store, true); + commonStatsFlags.set(CommonStatsFlags.Flag.Indexing, true); + + Index newIndex = new Index("index", "_na_"); + + MockNodeIndicesStats mockNodeIndicesStats = generateMockNodeIndicesStats(commonStats, newIndex, commonStatsFlags, null); + + // To test out scenario when the incoming node stats response is from a node with an older ES Version. + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(Version.V_2_13_0); + mockNodeIndicesStats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(Version.V_2_13_0); + MockNodeIndicesStats newNodeIndicesStats = new MockNodeIndicesStats(in); + + List incomingIndexStats = newNodeIndicesStats.getStatsByShard().get(newIndex); + incomingIndexStats.forEach(indexShardStats -> { + ShardStats shardStats = Arrays.stream(indexShardStats.getShards()).findFirst().get(); + DocsStats incomingDocStats = shardStats.getStats().docs; + + assertEquals(incomingDocStats.getCount(), hostDocStats.getCount()); + assertEquals(incomingDocStats.getTotalSizeInBytes(), hostDocStats.getTotalSizeInBytes()); + assertEquals(incomingDocStats.getAverageSizeInBytes(), hostDocStats.getAverageSizeInBytes()); + assertEquals(incomingDocStats.getDeleted(), hostDocStats.getDeleted()); + }); + } + } + } + + public void testNodeIndicesStatsSerialization() throws IOException { + long numDocs = randomLongBetween(0, 10000); + long numDeletedDocs = randomLongBetween(0, 100); + List levelParams = new ArrayList<>(); + levelParams.add(NodeIndicesStats.StatsLevel.INDICES); + levelParams.add(NodeIndicesStats.StatsLevel.SHARDS); + levelParams.add(NodeIndicesStats.StatsLevel.NODE); + CommonStats commonStats = new CommonStats(CommonStatsFlags.NONE); + + commonStats.docs = new DocsStats(numDocs, numDeletedDocs, 0); + commonStats.store = new StoreStats(100, 0L); + commonStats.indexing = new IndexingStats(); + + CommonStatsFlags commonStatsFlags = new CommonStatsFlags(); + commonStatsFlags.clear(); + commonStatsFlags.set(CommonStatsFlags.Flag.Docs, true); + commonStatsFlags.set(CommonStatsFlags.Flag.Store, true); + commonStatsFlags.set(CommonStatsFlags.Flag.Indexing, true); + commonStatsFlags.setIncludeIndicesStatsByLevel(true); + + levelParams.forEach(level -> { + Index newIndex = new Index("index", "_na_"); + + MockNodeIndicesStats mockNodeIndicesStats = generateMockNodeIndicesStats(commonStats, newIndex, commonStatsFlags, level); + + // To test out scenario when the incoming node stats response is from a node with an older ES Version. + try (BytesStreamOutput out = new BytesStreamOutput()) { + mockNodeIndicesStats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + MockNodeIndicesStats newNodeIndicesStats = new MockNodeIndicesStats(in); + switch (level) { + case NODE: + assertNull(newNodeIndicesStats.getStatsByIndex()); + assertNull(newNodeIndicesStats.getStatsByShard()); + break; + case INDICES: + assertNull(newNodeIndicesStats.getStatsByShard()); + assertNotNull(newNodeIndicesStats.getStatsByIndex()); + break; + case SHARDS: + assertNull(newNodeIndicesStats.getStatsByIndex()); + assertNotNull(newNodeIndicesStats.getStatsByShard()); + break; + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + public void testNodeIndicesStatsToXContent() { + long numDocs = randomLongBetween(0, 10000); + long numDeletedDocs = randomLongBetween(0, 100); + List levelParams = new ArrayList<>(); + levelParams.add(NodeIndicesStats.StatsLevel.INDICES); + levelParams.add(NodeIndicesStats.StatsLevel.SHARDS); + levelParams.add(NodeIndicesStats.StatsLevel.NODE); + CommonStats commonStats = new CommonStats(CommonStatsFlags.NONE); + + commonStats.docs = new DocsStats(numDocs, numDeletedDocs, 0); + commonStats.store = new StoreStats(100, 0L); + commonStats.indexing = new IndexingStats(); + + CommonStatsFlags commonStatsFlags = new CommonStatsFlags(); + commonStatsFlags.clear(); + commonStatsFlags.set(CommonStatsFlags.Flag.Docs, true); + commonStatsFlags.set(CommonStatsFlags.Flag.Store, true); + commonStatsFlags.set(CommonStatsFlags.Flag.Indexing, true); + commonStatsFlags.setIncludeIndicesStatsByLevel(true); + + levelParams.forEach(level -> { + + Index newIndex = new Index("index", "_na_"); + + MockNodeIndicesStats mockNodeIndicesStats = generateMockNodeIndicesStats(commonStats, newIndex, commonStatsFlags, level); + + XContentBuilder builder = null; + try { + builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder = mockNodeIndicesStats.toXContent( + builder, + new ToXContent.MapParams(Collections.singletonMap("level", level.getRestName())) + ); + builder.endObject(); + + Map xContentMap = xContentBuilderToMap(builder); + LinkedHashMap indicesStatsMap = (LinkedHashMap) xContentMap.get(NodeIndicesStats.StatsLevel.INDICES.getRestName()); + + switch (level) { + case NODE: + assertFalse(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.INDICES.getRestName())); + assertFalse(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.SHARDS.getRestName())); + break; + case INDICES: + assertTrue(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.INDICES.getRestName())); + assertFalse(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.SHARDS.getRestName())); + break; + case SHARDS: + assertFalse(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.INDICES.getRestName())); + assertTrue(indicesStatsMap.containsKey(NodeIndicesStats.StatsLevel.SHARDS.getRestName())); + break; + } + } catch (IOException e) { + throw new RuntimeException(e); + } + + }); + } + + public void testNodeIndicesStatsWithAndWithoutAggregations() throws IOException { + + CommonStatsFlags commonStatsFlags = new CommonStatsFlags( + CommonStatsFlags.Flag.Docs, + CommonStatsFlags.Flag.Store, + CommonStatsFlags.Flag.Indexing, + CommonStatsFlags.Flag.Completion, + CommonStatsFlags.Flag.Flush, + CommonStatsFlags.Flag.FieldData, + CommonStatsFlags.Flag.QueryCache, + CommonStatsFlags.Flag.Segments + ); + + int numberOfIndexes = randomIntBetween(1, 3); + List indexList = new ArrayList<>(); + for (int i = 0; i < numberOfIndexes; i++) { + Index index = new Index("test-index-" + i, "_na_"); + indexList.add(index); + } + + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + HashMap> statsByShards = createRandomShardByStats(indexList); + + final MockNodeIndicesStats nonAggregatedNodeIndicesStats = new MockNodeIndicesStats( + new CommonStats(commonStatsFlags), + statsByShards, + new SearchRequestStats(clusterSettings) + ); + + commonStatsFlags.setIncludeIndicesStatsByLevel(true); + + Arrays.stream(NodeIndicesStats.StatsLevel.values()).forEach(level -> { + MockNodeIndicesStats aggregatedNodeIndicesStats = new MockNodeIndicesStats( + new CommonStats(commonStatsFlags), + statsByShards, + new SearchRequestStats(clusterSettings), + level + ); + + XContentBuilder nonAggregatedBuilder = null; + XContentBuilder aggregatedBuilder = null; + try { + nonAggregatedBuilder = XContentFactory.jsonBuilder(); + nonAggregatedBuilder.startObject(); + nonAggregatedBuilder = nonAggregatedNodeIndicesStats.toXContent( + nonAggregatedBuilder, + new ToXContent.MapParams(Collections.singletonMap("level", level.getRestName())) + ); + nonAggregatedBuilder.endObject(); + Map nonAggregatedContentMap = xContentBuilderToMap(nonAggregatedBuilder); + + aggregatedBuilder = XContentFactory.jsonBuilder(); + aggregatedBuilder.startObject(); + aggregatedBuilder = aggregatedNodeIndicesStats.toXContent( + aggregatedBuilder, + new ToXContent.MapParams(Collections.singletonMap("level", level.getRestName())) + ); + aggregatedBuilder.endObject(); + Map aggregatedContentMap = xContentBuilderToMap(aggregatedBuilder); + + assertEquals(aggregatedContentMap, nonAggregatedContentMap); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + private CommonStats createRandomCommonStats() { + CommonStats commonStats = new CommonStats(CommonStatsFlags.NONE); + commonStats.docs = new DocsStats(randomLongBetween(0, 10000), randomLongBetween(0, 100), randomLongBetween(0, 1000)); + commonStats.store = new StoreStats(randomLongBetween(0, 100), randomLongBetween(0, 1000)); + commonStats.indexing = new IndexingStats(); + commonStats.completion = new CompletionStats(); + commonStats.flush = new FlushStats(randomLongBetween(0, 100), randomLongBetween(0, 100), randomLongBetween(0, 100)); + commonStats.fieldData = new FieldDataStats(randomLongBetween(0, 100), randomLongBetween(0, 100), null); + commonStats.queryCache = new QueryCacheStats( + randomLongBetween(0, 100), + randomLongBetween(0, 100), + randomLongBetween(0, 100), + randomLongBetween(0, 100), + randomLongBetween(0, 100) + ); + commonStats.segments = new SegmentsStats(); + + return commonStats; + } + + private HashMap> createRandomShardByStats(List indexes) { + DiscoveryNode localNode = new DiscoveryNode("node", buildNewFakeTransportAddress(), Version.CURRENT); + HashMap> statsByShards = new HashMap<>(); + indexes.forEach(index -> { + List indexShardStatsList = new ArrayList<>(); + + int numberOfShards = randomIntBetween(1, 4); + for (int i = 0; i < numberOfShards; i++) { + ShardRoutingState shardRoutingState = ShardRoutingState.fromValue((byte) randomIntBetween(2, 3)); + + ShardRouting shardRouting = TestShardRouting.newShardRouting( + index.getName(), + i, + localNode.getId(), + randomBoolean(), + shardRoutingState + ); + + Path path = createTempDir().resolve("indices") + .resolve(shardRouting.shardId().getIndex().getUUID()) + .resolve(String.valueOf(shardRouting.shardId().id())); + + ShardStats shardStats = new ShardStats( + shardRouting, + new ShardPath(false, path, path, shardRouting.shardId()), + createRandomCommonStats(), + null, + null, + null + ); + List shardStatsList = new ArrayList<>(); + shardStatsList.add(shardStats); + IndexShardStats indexShardStats = new IndexShardStats(shardRouting.shardId(), shardStatsList.toArray(new ShardStats[0])); + indexShardStatsList.add(indexShardStats); + } + statsByShards.put(index, indexShardStatsList); + }); + + return statsByShards; + } + + private Map xContentBuilderToMap(XContentBuilder xContentBuilder) { + return XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(); + } + + public MockNodeIndicesStats generateMockNodeIndicesStats( + CommonStats commonStats, + Index index, + CommonStatsFlags commonStatsFlags, + NodeIndicesStats.StatsLevel level + ) { + DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); + Map> statsByShard = new HashMap<>(); + List indexShardStatsList = new ArrayList<>(); + Index statsIndex = null; + for (int i = 0; i < 2; i++) { + ShardRoutingState shardRoutingState = ShardRoutingState.fromValue((byte) randomIntBetween(2, 3)); + ShardRouting shardRouting = TestShardRouting.newShardRouting( + index.getName(), + i, + localNode.getId(), + randomBoolean(), + shardRoutingState + ); + + if (statsIndex == null) { + statsIndex = shardRouting.shardId().getIndex(); + } + + Path path = createTempDir().resolve("indices") + .resolve(shardRouting.shardId().getIndex().getUUID()) + .resolve(String.valueOf(shardRouting.shardId().id())); + + ShardStats shardStats = new ShardStats( + shardRouting, + new ShardPath(false, path, path, shardRouting.shardId()), + commonStats, + null, + null, + null + ); + IndexShardStats indexShardStats = new IndexShardStats(shardRouting.shardId(), new ShardStats[] { shardStats }); + indexShardStatsList.add(indexShardStats); + } + + statsByShard.put(statsIndex, indexShardStatsList); + + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + if (commonStatsFlags.getIncludeIndicesStatsByLevel()) { + return new MockNodeIndicesStats( + new CommonStats(commonStatsFlags), + statsByShard, + new SearchRequestStats(clusterSettings), + level + ); + } else { + return new MockNodeIndicesStats(new CommonStats(commonStatsFlags), statsByShard, new SearchRequestStats(clusterSettings)); + } + } }