diff --git a/docs/changelog/112595.yaml b/docs/changelog/112595.yaml new file mode 100644 index 0000000000000..19ee0368475ae --- /dev/null +++ b/docs/changelog/112595.yaml @@ -0,0 +1,6 @@ +pr: 112595 +summary: Collect and display execution metadata for ES|QL cross cluster searches +area: ES|QL +type: enhancement +issues: + - 112402 diff --git a/docs/reference/esql/esql-across-clusters.asciidoc b/docs/reference/esql/esql-across-clusters.asciidoc index d13b3db1c73ea..cfcb5de73602c 100644 --- a/docs/reference/esql/esql-across-clusters.asciidoc +++ b/docs/reference/esql/esql-across-clusters.asciidoc @@ -85,7 +85,7 @@ POST /_security/role/remote1 "privileges": [ "read","read_cross_cluster" ], <4> "clusters" : ["my_remote_cluster"] <5> } - ], + ], "remote_cluster": [ <6> { "privileges": [ @@ -100,15 +100,23 @@ POST /_security/role/remote1 ---- <1> The `cross_cluster_search` cluster privilege is required for the _local_ cluster. -<2> Typically, users will have permissions to read both local and remote indices. However, for cases where the role is intended to ONLY search the remote cluster, the `read` permission is still required for the local cluster. To provide read access to the local cluster, but disallow reading any indices in the local cluster, the `names` field may be an empty string. -<3> The indices allowed read access to the remote cluster. The configured <> must also allow this index to be read. -<4> The `read_cross_cluster` privilege is always required when using {esql} across clusters with the API key based security model. +<2> Typically, users will have permissions to read both local and remote indices. However, for cases where the role +is intended to ONLY search the remote cluster, the `read` permission is still required for the local cluster. +To provide read access to the local cluster, but disallow reading any indices in the local cluster, the `names` +field may be an empty string. +<3> The indices allowed read access to the remote cluster. The configured +<> must also allow this index to be read. +<4> The `read_cross_cluster` privilege is always required when using {esql} across clusters with the API key based +security model. <5> The remote clusters to which these privileges apply. -This remote cluster must be configured with a <> and connected to the remote cluster before the remote index can be queried. +This remote cluster must be configured with a <> +and connected to the remote cluster before the remote index can be queried. Verify connection using the <> API. -<6> Required to allow remote enrichment. Without this, the user cannot read from the `.enrich` indices on the remote cluster. The `remote_cluster` security privilege was introduced in version *8.15.0*. +<6> Required to allow remote enrichment. Without this, the user cannot read from the `.enrich` indices on the +remote cluster. The `remote_cluster` security privilege was introduced in version *8.15.0*. -You will then need a user or API key with the permissions you created above. The following example API call creates a user with the `remote1` role. +You will then need a user or API key with the permissions you created above. The following example API call creates +a user with the `remote1` role. [source,console] ---- @@ -119,11 +127,13 @@ POST /_security/user/remote_user } ---- -Remember that all cross-cluster requests from the local cluster are bound by the cross cluster API key’s privileges, which are controlled by the remote cluster's administrator. +Remember that all cross-cluster requests from the local cluster are bound by the cross cluster API key’s privileges, +which are controlled by the remote cluster's administrator. [TIP] ==== -Cross cluster API keys created in versions prior to 8.15.0 will need to replaced or updated to add the new permissions required for {esql} with ENRICH. +Cross cluster API keys created in versions prior to 8.15.0 will need to replaced or updated to add the new permissions +required for {esql} with ENRICH. ==== [discrete] @@ -174,6 +184,189 @@ FROM *:my-index-000001 | LIMIT 10 ---- +[discrete] +[[ccq-cluster-details]] +==== Cross-cluster metadata + +ES|QL {ccs} responses include metadata about the search on each cluster when the response format is JSON. +Here we show an example using the async search endpoint. {ccs-cap} metadata is also present in the synchronous +search endpoint. + +[source,console] +---- +POST /_query/async?format=json +{ + "query": """ + FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index* + | STATS COUNT(http.response.status_code) BY user.id + | LIMIT 2 + """ +} +---- +// TEST[setup:my_index] +// TEST[s/cluster_one:my-index-000001,cluster_two:my-index//] + +Which returns: + +[source,console-result] +---- +{ + "is_running": false, + "took": 42, <1> + "columns" : [ + { + "name" : "COUNT(http.response.status_code)", + "type" : "long" + }, + { + "name" : "user.id", + "type" : "keyword" + } + ], + "values" : [ + [4, "elkbee"], + [1, "kimchy"] + ], + "_clusters": { <2> + "total": 3, + "successful": 3, + "running": 0, + "skipped": 0, + "partial": 0, + "failed": 0, + "details": { <3> + "(local)": { <4> + "status": "successful", + "indices": "blogs", + "took": 36, <5> + "_shards": { <6> + "total": 13, + "successful": 13, + "skipped": 0, + "failed": 0 + } + }, + "cluster_one": { + "status": "successful", + "indices": "cluster_one:my-index-000001", + "took": 38, + "_shards": { + "total": 4, + "successful": 4, + "skipped": 0, + "failed": 0 + } + }, + "cluster_two": { + "status": "successful", + "indices": "cluster_two:my-index*", + "took": 41, + "_shards": { + "total": 18, + "successful": 18, + "skipped": 1, + "failed": 0 + } + } + } + } +} +---- +// TEST[skip: cross-cluster testing env not set up] + +<1> How long the entire search (across all clusters) took, in milliseconds. +<2> This section of counters shows all possible cluster search states and how many cluster +searches are currently in that state. The clusters can have one of the following statuses: *running*, +*successful* (searches on all shards were successful), *skipped* (the search +failed on a cluster marked with `skip_unavailable`=`true`) or *failed* (the search +failed on a cluster marked with `skip_unavailable`=`false`). +<3> The `_clusters/details` section shows metadata about the search on each cluster. +<4> If you included indices from the local cluster you sent the request to in your {ccs}, +it is identified as "(local)". +<5> How long (in milliseconds) the search took on each cluster. This can be useful to determine +which clusters have slower response times than others. +<6> The shard details for the search on that cluster, including a count of shards that were +skipped due to the can-match phase. Shards are skipped when they cannot have any matching data +and therefore are not included in the full ES|QL query. + + +The cross-cluster metadata can be used to determine whether any data came back from a cluster. +For instance, in the query below, the wildcard expression for `cluster-two` did not resolve +to a concrete index (or indices). The cluster is, therefore, marked as 'skipped' and the total +number of shards searched is set to zero. +Since the other cluster did have a matching index, the search did not return an error, but +instead returned all the matching data it could find. + + +[source,console] +---- +POST /_query/async?format=json +{ + "query": """ + FROM cluster_one:my-index*,cluster_two:logs* + | STATS COUNT(http.response.status_code) BY user.id + | LIMIT 2 + """ +} +---- +// TEST[continued] +// TEST[s/cluster_one:my-index\*,cluster_two:logs\*/my-index-000001/] + +Which returns: + +[source,console-result] +---- +{ + "is_running": false, + "took": 55, + "columns": [ + ... // not shown + ], + "values": [ + ... // not shown + ], + "_clusters": { + "total": 2, + "successful": 2, + "running": 0, + "skipped": 0, + "partial": 0, + "failed": 0, + "details": { + "cluster_one": { + "status": "successful", + "indices": "cluster_one:my-index*", + "took": 38, + "_shards": { + "total": 4, + "successful": 4, + "skipped": 0, + "failed": 0 + } + }, + "cluster_two": { + "status": "skipped", <1> + "indices": "cluster_two:logs*", + "took": 0, + "_shards": { + "total": 0, <2> + "successful": 0, + "skipped": 0, + "failed": 0 + } + } + } + } +} +---- +// TEST[skip: cross-cluster testing env not set up] + +<1> This cluster is marked as 'skipped', since there were no matching indices on that cluster. +<2> Indicates that no shards were searched (due to not having any matching indices). + + + + [discrete] [[ccq-enrich]] ==== Enrich across clusters @@ -331,8 +524,7 @@ setting. As a result, if a remote cluster specified in the request is unavailable or failed, {ccs} for {esql} queries will fail regardless of the setting. We are actively working to align the behavior of {ccs} for {esql} with other -{ccs} APIs. This includes providing detailed execution information for each cluster -in the response, such as execution time, selected target indices, and shards. +{ccs} APIs. [discrete] [[ccq-during-upgrade]] diff --git a/docs/reference/esql/esql-rest.asciidoc b/docs/reference/esql/esql-rest.asciidoc index b2493dc32a211..1cd808b2aef44 100644 --- a/docs/reference/esql/esql-rest.asciidoc +++ b/docs/reference/esql/esql-rest.asciidoc @@ -192,6 +192,7 @@ Which returns: [source,console-result] ---- { + "took": 28, "columns": [ {"name": "author", "type": "text"}, {"name": "name", "type": "text"}, @@ -206,6 +207,7 @@ Which returns: ] } ---- +// TESTRESPONSE[s/"took": 28/"took": "$body.took"/] [discrete] [[esql-locale-param]] @@ -385,12 +387,13 @@ GET /_query/async/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUT // TEST[skip: no access to query ID - may return response values] If the response's `is_running` value is `false`, the query has finished -and the results are returned. +and the results are returned, along with the `took` time for the query. [source,console-result] ---- { "is_running": false, + "took": 48, "columns": ... } ---- diff --git a/docs/reference/esql/multivalued-fields.asciidoc b/docs/reference/esql/multivalued-fields.asciidoc index 8ff645bba863e..2dfda3060d3ea 100644 --- a/docs/reference/esql/multivalued-fields.asciidoc +++ b/docs/reference/esql/multivalued-fields.asciidoc @@ -26,6 +26,7 @@ Multivalued fields come back as a JSON array: [source,console-result] ---- { + "took": 28, "columns": [ { "name": "a", "type": "long"}, { "name": "b", "type": "long"} @@ -36,6 +37,8 @@ Multivalued fields come back as a JSON array: ] } ---- +// TESTRESPONSE[s/"took": 28/"took": "$body.took"/] + The relative order of values in a multivalued field is undefined. They'll frequently be in ascending order but don't rely on that. @@ -74,6 +77,7 @@ And {esql} sees that removal: [source,console-result] ---- { + "took": 28, "columns": [ { "name": "a", "type": "long"}, { "name": "b", "type": "keyword"} @@ -84,6 +88,8 @@ And {esql} sees that removal: ] } ---- +// TESTRESPONSE[s/"took": 28/"took": "$body.took"/] + But other types, like `long` don't remove duplicates. @@ -115,6 +121,7 @@ And {esql} also sees that: [source,console-result] ---- { + "took": 28, "columns": [ { "name": "a", "type": "long"}, { "name": "b", "type": "long"} @@ -125,6 +132,8 @@ And {esql} also sees that: ] } ---- +// TESTRESPONSE[s/"took": 28/"took": "$body.took"/] + This is all at the storage layer. If you store duplicate `long`s and then convert them to strings the duplicates will stay: @@ -155,6 +164,7 @@ POST /_query [source,console-result] ---- { + "took": 28, "columns": [ { "name": "a", "type": "long"}, { "name": "b", "type": "keyword"} @@ -165,6 +175,7 @@ POST /_query ] } ---- +// TESTRESPONSE[s/"took": 28/"took": "$body.took"/] [discrete] [[esql-multivalued-fields-functions]] @@ -198,6 +209,7 @@ POST /_query [source,console-result] ---- { + "took": 28, "columns": [ { "name": "a", "type": "long"}, { "name": "b", "type": "long"}, @@ -210,6 +222,7 @@ POST /_query ] } ---- +// TESTRESPONSE[s/"took": 28/"took": "$body.took"/] Work around this limitation by converting the field to single value with one of: @@ -233,6 +246,7 @@ POST /_query [source,console-result] ---- { + "took": 28, "columns": [ { "name": "a", "type": "long"}, { "name": "b", "type": "long"}, @@ -245,4 +259,4 @@ POST /_query ] } ---- - +// TESTRESPONSE[s/"took": 28/"took": "$body.took"/] diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index 9fac2b7dde647..ec04b63a575db 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -19,6 +19,9 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.NoSeedNodeLeftException; +import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.xcontent.XContentParseException; import java.io.IOException; @@ -472,7 +475,7 @@ public static ShardOperationFailedException[] groupBy(ShardOperationFailedExcept } /** - * Utility method useful for determine whether to log an Exception or perhaps + * Utility method useful for determining whether to log an Exception or perhaps * avoid logging a stacktrace if the caller/logger is not interested in these * types of node/shard issues. * @@ -490,6 +493,27 @@ public static boolean isNodeOrShardUnavailableTypeException(Throwable t) { || t instanceof org.elasticsearch.cluster.block.ClusterBlockException); } + /** + * Checks the exception against a known list of exceptions that indicate a remote cluster + * cannot be connected to. + * + * @param e Exception to inspect + * @return true if the Exception is known to indicate that a remote cluster + * is unavailable (cannot be connected to by the transport layer) + */ + public static boolean isRemoteUnavailableException(Exception e) { + Throwable unwrap = unwrap(e, ConnectTransportException.class, NoSuchRemoteClusterException.class, NoSeedNodeLeftException.class); + if (unwrap != null) { + return true; + } + Throwable ill = unwrap(e, IllegalStateException.class, IllegalArgumentException.class); + if (ill != null && (ill.getMessage().contains("Unable to open any connections") || ill.getMessage().contains("unknown host"))) { + return true; + } + // doesn't look like any of the known remote exceptions + return false; + } + private static class GroupBy { final String reason; final String index; diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 2039376bcb31d..5487cd11d03f9 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -229,6 +229,7 @@ static TransportVersion def(int id) { public static final TransportVersion RETAIN_ILM_STEP_INFO = def(8_753_00_0); public static final TransportVersion ADD_DATA_STREAM_OPTIONS = def(8_754_00_0); public static final TransportVersion CCS_REMOTE_TELEMETRY_STATS = def(8_755_00_0); + public static final TransportVersion ESQL_CCS_EXECUTION_INFO = def(8_756_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java index 9b9d0ff89a4f4..9e58d6d8febef 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java @@ -21,9 +21,6 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.query.SearchTimeoutException; import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.NoSeedNodeLeftException; -import org.elasticsearch.transport.NoSuchRemoteClusterException; import java.util.Arrays; import java.util.HashMap; @@ -118,7 +115,7 @@ public static Result getFailureType(Exception e) { if (unwrapped instanceof Exception) { e = (Exception) unwrapped; } - if (isRemoteUnavailable(e)) { + if (ExceptionsHelper.isRemoteUnavailableException(e)) { return Result.REMOTES_UNAVAILABLE; } if (ExceptionsHelper.unwrap(e, ResourceNotFoundException.class) != null) { @@ -149,27 +146,6 @@ public static Result getFailureType(Exception e) { return Result.UNKNOWN; } - /** - * Is this failure exception because remote was unavailable? - * See also: TransportResolveClusterAction#notConnectedError - */ - static boolean isRemoteUnavailable(Exception e) { - if (ExceptionsHelper.unwrap( - e, - ConnectTransportException.class, - NoSuchRemoteClusterException.class, - NoSeedNodeLeftException.class - ) != null) { - return true; - } - Throwable ill = ExceptionsHelper.unwrap(e, IllegalStateException.class, IllegalArgumentException.class); - if (ill != null && (ill.getMessage().contains("Unable to open any connections") || ill.getMessage().contains("unknown host"))) { - return true; - } - // Ok doesn't look like any of the known remote exceptions - return false; - } - /** * Is this failure coming from a remote cluster? */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java index 8547b5ea7d756..c30a2a44274a7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java @@ -35,9 +35,6 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.NoSeedNodeLeftException; -import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; @@ -172,7 +169,7 @@ public void onFailure(Exception failure) { releaseResourcesOnCancel(clusterInfoMap); return; } - if (notConnectedError(failure)) { + if (ExceptionsHelper.isRemoteUnavailableException((failure))) { clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(false, skipUnavailable)); } else if (ExceptionsHelper.unwrap( failure, @@ -246,27 +243,6 @@ public void onFailure(Exception e) { } } - /** - * Checks the exception against a known list of exceptions that indicate a remote cluster - * cannot be connected to. - */ - private boolean notConnectedError(Exception e) { - if (e instanceof ConnectTransportException || e instanceof NoSuchRemoteClusterException) { - return true; - } - if (e instanceof IllegalStateException && e.getMessage().contains("Unable to open any connections")) { - return true; - } - Throwable ill = ExceptionsHelper.unwrap(e, IllegalArgumentException.class); - if (ill != null && ill.getMessage().contains("unknown host")) { - return true; - } - if (ExceptionsHelper.unwrap(e, NoSeedNodeLeftException.class) != null) { - return true; - } - return false; - } - /** * Checks whether the local cluster has any matching indices (non-closed), aliases or data streams for * the index expression captured in localIndices. diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java index 2bdb821eda11a..8755139ad84b7 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java @@ -63,6 +63,16 @@ public static Iterator xContentValuesMap(String name, Map xContentFragmentValuesMapCreateOwnName(String name, Map map) { + return map(name, map, entry -> (ToXContent) (builder, params) -> entry.getValue().toXContent(builder, params)); + } + public static Iterator field(String name, boolean value) { return Iterators.single(((builder, params) -> builder.field(name, value))); } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesExpressionGrouper.java b/server/src/main/java/org/elasticsearch/indices/IndicesExpressionGrouper.java new file mode 100644 index 0000000000000..096ac0912d531 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/IndicesExpressionGrouper.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; + +import java.util.Map; + +/** + * Interface for grouping index expressions, along with IndicesOptions by cluster alias. + * Implementations should support the following: + * - plain index names + * - cluster:index notation + * - date math expression, including date math prefixed by a clusterAlias + * - wildcards + * - multiple index expressions (e.g., logs1,logs2,cluster-a:logs*) + * + * Note: these methods do not resolve index expressions to concrete indices. + */ +public interface IndicesExpressionGrouper { + + /** + * @param indicesOptions IndicesOptions to clarify how the index expression should be parsed/applied + * @param indexExpressionCsv Multiple index expressions as CSV string (with no spaces), e.g., "logs1,logs2,cluster-a:logs1". + * A single index expression is also supported. + * @return Map where the key is the cluster alias (for "local" cluster, it is RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) + * and the value for that cluster from the index expression is an OriginalIndices object. + */ + default Map groupIndices(IndicesOptions indicesOptions, String indexExpressionCsv) { + return groupIndices(indicesOptions, Strings.splitStringByCommaToArray(indexExpressionCsv)); + } + + /** + * Same behavior as the other groupIndices, except the incoming multiple index expressions must already be + * parsed into a String array. + * @param indicesOptions IndicesOptions to clarify how the index expressions should be parsed/applied + * @param indexExpressions Multiple index expressions as string[]. + * @return Map where the key is the cluster alias (for "local" cluster, it is RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) + * and the value for that cluster from the index expression is an OriginalIndices object. + */ + Map groupIndices(IndicesOptions indicesOptions, String[] indexExpressions); +} diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 0d6b2cf45138b..ccb00181798db 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -68,6 +68,20 @@ public static boolean isRemoteIndexName(String indexExpression) { return indexExpression.indexOf(RemoteClusterService.REMOTE_CLUSTER_INDEX_SEPARATOR) > 0; } + /** + * @param indexExpression expects a single index expression at a time (not a csv list of expression) + * @return cluster alias in the index expression. If none is present, returns RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY + */ + public static String parseClusterAlias(String indexExpression) { + assert indexExpression != null : "Must not pass null indexExpression"; + String[] parts = splitIndexName(indexExpression.trim()); + if (parts[0] == null) { + return RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + } else { + return parts[0]; + } + } + /** * Split the index name into remote cluster alias and index name. * The index expression is assumed to be individual index (no commas) but can contain `-`, wildcards, diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 620b80e91cb45..5e955539ee2ee 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.indices.IndicesExpressionGrouper; import org.elasticsearch.node.ReportingService; import org.elasticsearch.transport.RemoteClusterCredentialsManager.UpdateRemoteClusterCredentialsResult; @@ -61,7 +62,11 @@ /** * Basic service for accessing remote clusters via gateway nodes */ -public final class RemoteClusterService extends RemoteClusterAware implements Closeable, ReportingService { +public final class RemoteClusterService extends RemoteClusterAware + implements + Closeable, + ReportingService, + IndicesExpressionGrouper { private static final Logger logger = LogManager.getLogger(RemoteClusterService.class); diff --git a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java index 97b40dfeee52a..e45ac8a9e0f70 100644 --- a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java +++ b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ListMatcher; +import org.elasticsearch.test.MapMatcher; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.threadpool.Scheduler; @@ -90,7 +91,8 @@ public void testSortByManyLongsSuccess() throws IOException { values = values.item(List.of(0, b)); } } - assertMap(map, matchesMap().entry("columns", columns).entry("values", values)); + MapMatcher mapMatcher = matchesMap(); + assertMap(map, mapMatcher.entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0))); } /** @@ -207,7 +209,8 @@ public void testGroupOnSomeLongs() throws IOException { Map map = responseAsMap(resp); ListMatcher columns = matchesList().item(matchesMap().entry("name", "MAX(a)").entry("type", "long")); ListMatcher values = matchesList().item(List.of(9)); - assertMap(map, matchesMap().entry("columns", columns).entry("values", values)); + MapMatcher mapMatcher = matchesMap(); + assertMap(map, mapMatcher.entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0))); } /** @@ -219,7 +222,8 @@ public void testGroupOnManyLongs() throws IOException { Map map = responseAsMap(resp); ListMatcher columns = matchesList().item(matchesMap().entry("name", "MAX(a)").entry("type", "long")); ListMatcher values = matchesList().item(List.of(9)); - assertMap(map, matchesMap().entry("columns", columns).entry("values", values)); + MapMatcher mapMatcher = matchesMap(); + assertMap(map, mapMatcher.entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0))); } private Response groupOnManyLongs(int count) throws IOException { @@ -249,7 +253,8 @@ public void testSmallConcat() throws IOException { ListMatcher columns = matchesList().item(matchesMap().entry("name", "a").entry("type", "long")) .item(matchesMap().entry("name", "str").entry("type", "keyword")); ListMatcher values = matchesList().item(List.of(1, "1".repeat(100))); - assertMap(map, matchesMap().entry("columns", columns).entry("values", values)); + MapMatcher mapMatcher = matchesMap(); + assertMap(map, mapMatcher.entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0))); } public void testHugeConcat() throws IOException { @@ -257,9 +262,10 @@ public void testHugeConcat() throws IOException { ResponseException e = expectThrows(ResponseException.class, () -> concat(10)); Map map = responseAsMap(e.getResponse()); logger.info("expected request rejected {}", map); + MapMatcher mapMatcher = matchesMap(); assertMap( map, - matchesMap().entry("status", 400) + mapMatcher.entry("status", 400) .entry("error", matchesMap().extraOk().entry("reason", "concatenating more than [1048576] bytes is not supported")) ); } @@ -287,7 +293,8 @@ public void testManyConcat() throws IOException { for (int s = 0; s < 300; s++) { columns = columns.item(matchesMap().entry("name", "str" + s).entry("type", "keyword")); } - assertMap(map, matchesMap().entry("columns", columns).entry("values", any(List.class))); + MapMatcher mapMatcher = matchesMap(); + assertMap(map, mapMatcher.entry("columns", columns).entry("values", any(List.class)).entry("took", greaterThanOrEqualTo(0))); } /** @@ -344,7 +351,8 @@ public void testManyEval() throws IOException { for (int i = 0; i < 20; i++) { columns = columns.item(matchesMap().entry("name", "i0" + i).entry("type", "long")); } - assertMap(map, matchesMap().entry("columns", columns).entry("values", hasSize(10_000))); + MapMatcher mapMatcher = matchesMap(); + assertMap(map, mapMatcher.entry("columns", columns).entry("values", hasSize(10_000)).entry("took", greaterThanOrEqualTo(0))); } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-serverless/issues/1874") diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index 2b162b4f18ead..f8f1fe872711d 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -149,24 +149,40 @@ public void testAllowedIndices() throws Exception { for (String user : List.of("test-admin", "user1")) { Response resp = runESQLCommand(user, "from index-user1 | stats sum=sum(value)"); assertOK(resp); - MapMatcher matcher = responseMatcher().entry("columns", List.of(Map.of("name", "sum", "type", "double"))) + Map responseMap = entityAsMap(resp); + MapMatcher mapMatcher = responseMatcher(); + if (responseMap.get("took") != null) { + mapMatcher = mapMatcher.entry("took", ((Integer) responseMap.get("took")).intValue()); + } + MapMatcher matcher = mapMatcher.entry("columns", List.of(Map.of("name", "sum", "type", "double"))) .entry("values", List.of(List.of(43.0d))); - assertMap(entityAsMap(resp), matcher); + assertMap(responseMap, matcher); } for (String user : List.of("test-admin", "user2")) { Response resp = runESQLCommand(user, "from index-user2 | stats sum=sum(value)"); assertOK(resp); - MapMatcher matcher = responseMatcher().entry("columns", List.of(Map.of("name", "sum", "type", "double"))) + Map responseMap = entityAsMap(resp); + MapMatcher mapMatcher = responseMatcher(); + if (responseMap.get("took") != null) { + mapMatcher = mapMatcher.entry("took", ((Integer) responseMap.get("took")).intValue()); + } + MapMatcher matcher = mapMatcher.entry("columns", List.of(Map.of("name", "sum", "type", "double"))) .entry("values", List.of(List.of(72.0d))); - assertMap(entityAsMap(resp), matcher); + assertMap(responseMap, matcher); } + for (var index : List.of("index-user2", "index-user*", "index*")) { Response resp = runESQLCommand("metadata1_read2", "from " + index + " | stats sum=sum(value)"); assertOK(resp); - MapMatcher matcher = responseMatcher().entry("columns", List.of(Map.of("name", "sum", "type", "double"))) + Map responseMap = entityAsMap(resp); + MapMatcher mapMatcher = responseMatcher(); + if (responseMap.get("took") != null) { + mapMatcher = mapMatcher.entry("took", ((Integer) responseMap.get("took")).intValue()); + } + MapMatcher matcher = mapMatcher.entry("columns", List.of(Map.of("name", "sum", "type", "double"))) .entry("values", List.of(List.of(72.0d))); - assertMap(entityAsMap(resp), matcher); + assertMap(responseMap, matcher); } } @@ -177,11 +193,11 @@ public void testAliases() throws Exception { "from " + index + " METADATA _index" + "| stats sum=sum(value), index=VALUES(_index)" ); assertOK(resp); - MapMatcher matcher = responseMatcher().entry( - "columns", - List.of(Map.of("name", "sum", "type", "double"), Map.of("name", "index", "type", "keyword")) - ).entry("values", List.of(List.of(72.0d, "index-user2"))); - assertMap(entityAsMap(resp), matcher); + Map responseMap = entityAsMap(resp); + MapMatcher matcher = responseMatcher().entry("took", ((Integer) responseMap.get("took")).intValue()) + .entry("columns", List.of(Map.of("name", "sum", "type", "double"), Map.of("name", "index", "type", "keyword"))) + .entry("values", List.of(List.of(72.0d, "index-user2"))); + assertMap(responseMap, matcher); } } @@ -189,15 +205,18 @@ public void testAliasFilter() throws Exception { for (var index : List.of("first-alias", "first-alias,index-*", "first-*,index-*")) { Response resp = runESQLCommand("alias_user1", "from " + index + " METADATA _index" + "| KEEP _index, org, value | LIMIT 10"); assertOK(resp); - MapMatcher matcher = responseMatcher().entry( - "columns", - List.of( - Map.of("name", "_index", "type", "keyword"), - Map.of("name", "org", "type", "keyword"), - Map.of("name", "value", "type", "double") + Map responseMap = entityAsMap(resp); + MapMatcher matcher = responseMatcher().entry("took", ((Integer) responseMap.get("took")).intValue()) + .entry( + "columns", + List.of( + Map.of("name", "_index", "type", "keyword"), + Map.of("name", "org", "type", "keyword"), + Map.of("name", "value", "type", "double") + ) ) - ).entry("values", List.of(List.of("index-user1", "sales", 31.0d))); - assertMap(entityAsMap(resp), matcher); + .entry("values", List.of(List.of("index-user1", "sales", 31.0d))); + assertMap(responseMap, matcher); } } diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java index d5c8926d93b84..454f3962c07ea 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.MapMatcher; import org.elasticsearch.test.TestClustersThreadFilter; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.ESRestTestCase; @@ -29,12 +30,16 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.hamcrest.Matchers.any; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; @ThreadLeakFilters(filters = TestClustersThreadFilter.class) public class MultiClustersIT extends ESRestTestCase { @@ -145,13 +150,31 @@ public void testCount() throws Exception { Map result = run("FROM test-local-index,*:test-remote-index | STATS c = COUNT(*)"); var columns = List.of(Map.of("name", "c", "type", "long")); var values = List.of(List.of(localDocs.size() + remoteDocs.size())); - assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + + MapMatcher mapMatcher = matchesMap(); + assertMap( + result, + mapMatcher.entry("columns", columns) + .entry("values", values) + .entry("took", greaterThanOrEqualTo(0)) + .entry("_clusters", any(Map.class)) + ); + assertClusterDetailsMap(result, false); } { Map result = run("FROM *:test-remote-index | STATS c = COUNT(*)"); var columns = List.of(Map.of("name", "c", "type", "long")); var values = List.of(List.of(remoteDocs.size())); - assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + + MapMatcher mapMatcher = matchesMap(); + assertMap( + result, + mapMatcher.entry("columns", columns) + .entry("values", values) + .entry("took", greaterThanOrEqualTo(0)) + .entry("_clusters", any(Map.class)) + ); + assertClusterDetailsMap(result, true); } } @@ -161,14 +184,86 @@ public void testUngroupedAggs() throws Exception { var columns = List.of(Map.of("name", "total", "type", "long")); long sum = Stream.concat(localDocs.stream(), remoteDocs.stream()).mapToLong(d -> d.data).sum(); var values = List.of(List.of(Math.toIntExact(sum))); - assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + + // check all sections of map except _cluster/details + MapMatcher mapMatcher = matchesMap(); + assertMap( + result, + mapMatcher.entry("columns", columns) + .entry("values", values) + .entry("took", greaterThanOrEqualTo(0)) + .entry("_clusters", any(Map.class)) + ); + assertClusterDetailsMap(result, false); } { Map result = run("FROM *:test-remote-index | STATS total = SUM(data)"); var columns = List.of(Map.of("name", "total", "type", "long")); long sum = remoteDocs.stream().mapToLong(d -> d.data).sum(); var values = List.of(List.of(Math.toIntExact(sum))); - assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + + // check all sections of map except _cluster/details + MapMatcher mapMatcher = matchesMap(); + assertMap( + result, + mapMatcher.entry("columns", columns) + .entry("values", values) + .entry("took", greaterThanOrEqualTo(0)) + .entry("_clusters", any(Map.class)) + ); + assertClusterDetailsMap(result, true); + } + } + + private void assertClusterDetailsMap(Map result, boolean remoteOnly) { + @SuppressWarnings("unchecked") + Map clusters = (Map) result.get("_clusters"); + assertThat(clusters.size(), equalTo(7)); + assertThat(clusters.keySet(), equalTo(Set.of("total", "successful", "running", "skipped", "partial", "failed", "details"))); + int expectedNumClusters = remoteOnly ? 1 : 2; + Set expectedClusterAliases = remoteOnly ? Set.of("remote_cluster") : Set.of("remote_cluster", "(local)"); + + assertThat(clusters.get("total"), equalTo(expectedNumClusters)); + assertThat(clusters.get("successful"), equalTo(expectedNumClusters)); + assertThat(clusters.get("running"), equalTo(0)); + assertThat(clusters.get("skipped"), equalTo(0)); + assertThat(clusters.get("partial"), equalTo(0)); + assertThat(clusters.get("failed"), equalTo(0)); + + @SuppressWarnings("unchecked") + Map details = (Map) clusters.get("details"); + assertThat(details.keySet(), equalTo(expectedClusterAliases)); + + @SuppressWarnings("unchecked") + Map remoteCluster = (Map) details.get("remote_cluster"); + assertThat(remoteCluster.keySet(), equalTo(Set.of("status", "indices", "took", "_shards"))); + assertThat(remoteCluster.get("status"), equalTo("successful")); + assertThat(remoteCluster.get("indices"), equalTo("test-remote-index")); + assertThat((Integer) remoteCluster.get("took"), greaterThanOrEqualTo(0)); + + @SuppressWarnings("unchecked") + Map remoteClusterShards = (Map) remoteCluster.get("_shards"); + assertThat(remoteClusterShards.keySet(), equalTo(Set.of("total", "successful", "skipped", "failed"))); + assertThat((Integer) remoteClusterShards.get("total"), greaterThanOrEqualTo(0)); + assertThat((Integer) remoteClusterShards.get("successful"), equalTo((Integer) remoteClusterShards.get("total"))); + assertThat((Integer) remoteClusterShards.get("skipped"), equalTo(0)); + assertThat((Integer) remoteClusterShards.get("failed"), equalTo(0)); + + if (remoteOnly == false) { + @SuppressWarnings("unchecked") + Map localCluster = (Map) details.get("(local)"); + assertThat(localCluster.keySet(), equalTo(Set.of("status", "indices", "took", "_shards"))); + assertThat(localCluster.get("status"), equalTo("successful")); + assertThat(localCluster.get("indices"), equalTo("test-local-index")); + assertThat((Integer) localCluster.get("took"), greaterThanOrEqualTo(0)); + + @SuppressWarnings("unchecked") + Map localClusterShards = (Map) localCluster.get("_shards"); + assertThat(localClusterShards.keySet(), equalTo(Set.of("total", "successful", "skipped", "failed"))); + assertThat((Integer) localClusterShards.get("total"), greaterThanOrEqualTo(0)); + assertThat((Integer) localClusterShards.get("successful"), equalTo((Integer) localClusterShards.get("total"))); + assertThat((Integer) localClusterShards.get("skipped"), equalTo(0)); + assertThat((Integer) localClusterShards.get("failed"), equalTo(0)); } } @@ -183,7 +278,16 @@ public void testGroupedAggs() throws Exception { .sorted(Map.Entry.comparingByKey()) .map(e -> List.of(Math.toIntExact(e.getValue()), e.getKey())) .toList(); - assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + + MapMatcher mapMatcher = matchesMap(); + assertMap( + result, + mapMatcher.entry("columns", columns) + .entry("values", values) + .entry("took", greaterThanOrEqualTo(0)) + .entry("_clusters", any(Map.class)) + ); + assertClusterDetailsMap(result, false); } { Map result = run("FROM *:test-remote-index | STATS total = SUM(data) by color | SORT color"); @@ -195,7 +299,17 @@ public void testGroupedAggs() throws Exception { .sorted(Map.Entry.comparingByKey()) .map(e -> List.of(Math.toIntExact(e.getValue()), e.getKey())) .toList(); - assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + + // check all sections of map except _cluster/details + MapMatcher mapMatcher = matchesMap(); + assertMap( + result, + mapMatcher.entry("columns", columns) + .entry("values", values) + .entry("took", greaterThanOrEqualTo(0)) + .entry("_clusters", any(Map.class)) + ); + assertClusterDetailsMap(result, true); } } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index cf5b3453fa97c..c5ab20469bf77 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -44,6 +44,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; @@ -78,10 +79,12 @@ public void testBasicEsql() throws IOException { builder.pragmas(Settings.builder().put("data_partitioning", "shard").build()); } Map result = runEsql(builder); - assertEquals(2, result.size()); + assertEquals(3, result.size()); Map colA = Map.of("name", "avg(value)", "type", "double"); assertEquals(List.of(colA), result.get("columns")); assertEquals(List.of(List.of(499.5d)), result.get("values")); + assertTrue(result.containsKey("took")); + assertThat(((Number) result.get("took")).longValue(), greaterThanOrEqualTo(0L)); } public void testInvalidPragma() throws IOException { @@ -283,11 +286,13 @@ public void testProfile() throws IOException { builder.pragmas(Settings.builder().put("data_partitioning", "shard").build()); } Map result = runEsql(builder); + MapMatcher mapMatcher = matchesMap(); assertMap( result, - matchesMap().entry("columns", matchesList().item(matchesMap().entry("name", "AVG(value)").entry("type", "double"))) + mapMatcher.entry("columns", matchesList().item(matchesMap().entry("name", "AVG(value)").entry("type", "double"))) .entry("values", List.of(List.of(499.5d))) .entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) + .entry("took", greaterThanOrEqualTo(0)) ); List> signatures = new ArrayList<>(); @@ -362,21 +367,26 @@ public void testInlineStatsProfile() throws IOException { // Lock to shard level partitioning, so we get consistent profile output builder.pragmas(Settings.builder().put("data_partitioning", "shard").build()); } + Map result = runEsql(builder); + MapMatcher mapMatcher = matchesMap(); ListMatcher values = matchesList(); for (int i = 0; i < 1000; i++) { values = values.item(matchesList().item("2020-12-12T00:00:00.000Z").item("value" + i).item("value" + i).item(i).item(499.5)); } assertMap( result, - matchesMap().entry( + mapMatcher.entry( "columns", matchesList().item(matchesMap().entry("name", "@timestamp").entry("type", "date")) .item(matchesMap().entry("name", "test").entry("type", "text")) .item(matchesMap().entry("name", "test.keyword").entry("type", "keyword")) .item(matchesMap().entry("name", "value").entry("type", "long")) .item(matchesMap().entry("name", "AVG(value)").entry("type", "double")) - ).entry("values", values).entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) + ) + .entry("values", values) + .entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) + .entry("took", greaterThanOrEqualTo(0)) ); List> signatures = new ArrayList<>(); @@ -472,16 +482,20 @@ public void testForceSleepsProfile() throws IOException { for (int group2 = 0; group2 < 10; group2++) { expectedValues.add(List.of(1.0, 1, 1, 0, group2)); } + MapMatcher mapMatcher = matchesMap(); assertMap( result, - matchesMap().entry( + mapMatcher.entry( "columns", matchesList().item(matchesMap().entry("name", "AVG(value)").entry("type", "double")) .item(matchesMap().entry("name", "MAX(value)").entry("type", "long")) .item(matchesMap().entry("name", "MIN(value)").entry("type", "long")) .item(matchesMap().entry("name", "group1").entry("type", "long")) .item(matchesMap().entry("name", "group2").entry("type", "long")) - ).entry("values", expectedValues).entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) + ) + .entry("values", expectedValues) + .entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) + .entry("took", greaterThanOrEqualTo(0)) ); @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java index f5de02814c654..d124fdb5755c3 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java @@ -22,6 +22,7 @@ import org.elasticsearch.logging.Logger; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ListMatcher; +import org.elasticsearch.test.MapMatcher; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; @@ -48,6 +49,7 @@ import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.runEsqlSync; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; /** * Creates indices with many different mappings and fetches values from them to make sure @@ -302,7 +304,7 @@ public void testFlattenedUnsupported() throws IOException { assertMap( result, - matchesMap().entry("columns", List.of(columnInfo("flattened", "unsupported"))) + matchesMapWithOptionalTook(result.get("took")).entry("columns", List.of(columnInfo("flattened", "unsupported"))) .entry("values", List.of(matchesList().item(null))) ); } @@ -344,8 +346,10 @@ public void testTextFieldWithKeywordSubfield() throws IOException { assertMap( result, - matchesMap().entry("columns", List.of(columnInfo("text_field", "text"), columnInfo("text_field.raw", "keyword"))) - .entry("values", List.of(matchesList().item(value).item(value))) + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("text_field", "text"), columnInfo("text_field.raw", "keyword")) + ).entry("values", List.of(matchesList().item(value).item(value))) ); } @@ -368,8 +372,10 @@ public void testTextFieldWithIntegerSubfield() throws IOException { assertMap( result, - matchesMap().entry("columns", List.of(columnInfo("text_field", "text"), columnInfo("text_field.int", "integer"))) - .entry("values", List.of(matchesList().item(Integer.toString(value)).item(value))) + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("text_field", "text"), columnInfo("text_field.int", "integer")) + ).entry("values", List.of(matchesList().item(Integer.toString(value)).item(value))) ); } @@ -392,8 +398,10 @@ public void testTextFieldWithIntegerSubfieldMalformed() throws IOException { assertMap( result, - matchesMap().entry("columns", List.of(columnInfo("text_field", "text"), columnInfo("text_field.int", "integer"))) - .entry("values", List.of(matchesList().item(value).item(null))) + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("text_field", "text"), columnInfo("text_field.int", "integer")) + ).entry("values", List.of(matchesList().item(value).item(null))) ); } @@ -416,8 +424,10 @@ public void testTextFieldWithIpSubfield() throws IOException { assertMap( result, - matchesMap().entry("columns", List.of(columnInfo("text_field", "text"), columnInfo("text_field.ip", "ip"))) - .entry("values", List.of(matchesList().item(value).item(value))) + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("text_field", "text"), columnInfo("text_field.ip", "ip")) + ).entry("values", List.of(matchesList().item(value).item(value))) ); } @@ -440,8 +450,10 @@ public void testTextFieldWithIpSubfieldMalformed() throws IOException { assertMap( result, - matchesMap().entry("columns", List.of(columnInfo("text_field", "text"), columnInfo("text_field.ip", "ip"))) - .entry("values", List.of(matchesList().item(value).item(null))) + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("text_field", "text"), columnInfo("text_field.ip", "ip")) + ).entry("values", List.of(matchesList().item(value).item(null))) ); } @@ -465,7 +477,7 @@ public void testIntFieldWithTextOrKeywordSubfield() throws IOException { assertMap( result, - matchesMap().entry( + matchesMapWithOptionalTook(result.get("took")).entry( "columns", List.of(columnInfo("integer_field", "integer"), columnInfo("integer_field.str", text ? "text" : "keyword")) ).entry("values", List.of(matchesList().item(value).item(Integer.toString(value)))) @@ -492,7 +504,7 @@ public void testIntFieldWithTextOrKeywordSubfieldMalformed() throws IOException assertMap( result, - matchesMap().entry( + matchesMapWithOptionalTook(result.get("took")).entry( "columns", List.of(columnInfo("integer_field", "integer"), columnInfo("integer_field.str", text ? "text" : "keyword")) ).entry("values", List.of(matchesList().item(null).item(value))) @@ -519,8 +531,10 @@ public void testIpFieldWithTextOrKeywordSubfield() throws IOException { assertMap( result, - matchesMap().entry("columns", List.of(columnInfo("ip_field", "ip"), columnInfo("ip_field.str", text ? "text" : "keyword"))) - .entry("values", List.of(matchesList().item(value).item(value))) + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("ip_field", "ip"), columnInfo("ip_field.str", text ? "text" : "keyword")) + ).entry("values", List.of(matchesList().item(value).item(value))) ); } @@ -544,8 +558,10 @@ public void testIpFieldWithTextOrKeywordSubfieldMalformed() throws IOException { assertMap( result, - matchesMap().entry("columns", List.of(columnInfo("ip_field", "ip"), columnInfo("ip_field.str", text ? "text" : "keyword"))) - .entry("values", List.of(matchesList().item(null).item(value))) + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("ip_field", "ip"), columnInfo("ip_field.str", text ? "text" : "keyword")) + ).entry("values", List.of(matchesList().item(null).item(value))) ); } @@ -569,8 +585,10 @@ public void testIntFieldWithByteSubfield() throws IOException { assertMap( result, - matchesMap().entry("columns", List.of(columnInfo("integer_field", "integer"), columnInfo("integer_field.byte", "integer"))) - .entry("values", List.of(matchesList().item((int) value).item((int) value))) + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("integer_field", "integer"), columnInfo("integer_field.byte", "integer")) + ).entry("values", List.of(matchesList().item((int) value).item((int) value))) ); } @@ -596,8 +614,10 @@ public void testIntFieldWithByteSubfieldTooBig() throws IOException { assertMap( result, - matchesMap().entry("columns", List.of(columnInfo("integer_field", "integer"), columnInfo("integer_field.byte", "integer"))) - .entry("values", List.of(matchesList().item(value).item(null))) + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("integer_field", "integer"), columnInfo("integer_field.byte", "integer")) + ).entry("values", List.of(matchesList().item(value).item(null))) ); } @@ -621,11 +641,21 @@ public void testByteFieldWithIntSubfield() throws IOException { assertMap( result, - matchesMap().entry("columns", List.of(columnInfo("byte_field", "integer"), columnInfo("byte_field.int", "integer"))) - .entry("values", List.of(matchesList().item((int) value).item((int) value))) + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("byte_field", "integer"), columnInfo("byte_field.int", "integer")) + ).entry("values", List.of(matchesList().item((int) value).item((int) value))) ); } + static MapMatcher matchesMapWithOptionalTook(Object tookTimeValue) { + MapMatcher mapMatcher = matchesMap(); + if (tookTimeValue instanceof Number) { + mapMatcher = mapMatcher.entry("took", greaterThanOrEqualTo(0)); + } + return mapMatcher; + } + /** *
      * "byte_field": {
@@ -646,8 +676,10 @@ public void testByteFieldWithIntSubfieldTooBig() throws IOException {
 
         assertMap(
             result,
-            matchesMap().entry("columns", List.of(columnInfo("byte_field", "integer"), columnInfo("byte_field.int", "integer")))
-                .entry("values", List.of(matchesList().item(null).item(value)))
+            matchesMapWithOptionalTook(result.get("took")).entry(
+                "columns",
+                List.of(columnInfo("byte_field", "integer"), columnInfo("byte_field.int", "integer"))
+            ).entry("values", List.of(matchesList().item(null).item(value)))
         );
     }
 
@@ -676,7 +708,7 @@ public void testIncompatibleTypes() throws IOException {
         Map result = runEsql("FROM test*");
         assertMap(
             result,
-            matchesMap().entry("columns", List.of(columnInfo("f", "unsupported")))
+            matchesMapWithOptionalTook(result.get("took")).entry("columns", List.of(columnInfo("f", "unsupported")))
                 .entry("values", List.of(matchesList().item(null), matchesList().item(null)))
         );
         ResponseException e = expectThrows(ResponseException.class, () -> runEsql("FROM test* | SORT f | LIMIT 3"));
@@ -714,8 +746,10 @@ public void testDistinctInEachIndex() throws IOException {
         Map result = runEsql("FROM test* | SORT file, other");
         assertMap(
             result,
-            matchesMap().entry("columns", List.of(columnInfo("file", "keyword"), columnInfo("other", "keyword")))
-                .entry("values", List.of(matchesList().item("f1").item(null), matchesList().item(null).item("o2")))
+            matchesMapWithOptionalTook(result.get("took")).entry(
+                "columns",
+                List.of(columnInfo("file", "keyword"), columnInfo("other", "keyword"))
+            ).entry("values", List.of(matchesList().item("f1").item(null), matchesList().item(null).item("o2")))
         );
     }
 
@@ -778,8 +812,10 @@ public void testMergeKeywordAndObject() throws IOException {
         Map result = runEsql("FROM test* | SORT file.raw | LIMIT 2");
         assertMap(
             result,
-            matchesMap().entry("columns", List.of(columnInfo("file", "unsupported"), columnInfo("file.raw", "keyword")))
-                .entry("values", List.of(matchesList().item(null).item("o2"), matchesList().item(null).item(null)))
+            matchesMapWithOptionalTook(result.get("took")).entry(
+                "columns",
+                List.of(columnInfo("file", "unsupported"), columnInfo("file.raw", "keyword"))
+            ).entry("values", List.of(matchesList().item(null).item("o2"), matchesList().item(null).item(null)))
         );
     }
 
@@ -823,8 +859,10 @@ public void testPropagateUnsupportedToSubFields() throws IOException {
         Map result = runEsql("FROM test* | LIMIT 2");
         assertMap(
             result,
-            matchesMap().entry("columns", List.of(columnInfo("f", "unsupported"), columnInfo("f.raw", "unsupported")))
-                .entry("values", List.of(matchesList().item(null).item(null)))
+            matchesMapWithOptionalTook(result.get("took")).entry(
+                "columns",
+                List.of(columnInfo("f", "unsupported"), columnInfo("f.raw", "unsupported"))
+            ).entry("values", List.of(matchesList().item(null).item(null)))
         );
     }
 
@@ -886,8 +924,10 @@ public void testMergeUnsupportedAndObject() throws IOException {
         Map result = runEsql("FROM test* | LIMIT 2");
         assertMap(
             result,
-            matchesMap().entry("columns", List.of(columnInfo("f", "unsupported"), columnInfo("f.raw", "unsupported")))
-                .entry("values", List.of(matchesList().item(null).item(null), matchesList().item(null).item(null)))
+            matchesMapWithOptionalTook(result.get("took")).entry(
+                "columns",
+                List.of(columnInfo("f", "unsupported"), columnInfo("f.raw", "unsupported"))
+            ).entry("values", List.of(matchesList().item(null).item(null), matchesList().item(null).item(null)))
         );
     }
 
@@ -921,7 +961,7 @@ public void testIntegerDocValuesConflict() throws IOException {
         Map result = runEsql("FROM test* | SORT emp_no | LIMIT 2");
         assertMap(
             result,
-            matchesMap().entry("columns", List.of(columnInfo("emp_no", "integer")))
+            matchesMapWithOptionalTook(result.get("took")).entry("columns", List.of(columnInfo("emp_no", "integer")))
                 .entry("values", List.of(matchesList().item(1), matchesList().item(2)))
         );
     }
@@ -967,7 +1007,7 @@ public void testLongIntegerConflict() throws IOException {
         Map result = runEsql("FROM test* | LIMIT 2");
         assertMap(
             result,
-            matchesMap().entry("columns", List.of(columnInfo("emp_no", "unsupported")))
+            matchesMapWithOptionalTook(result.get("took")).entry("columns", List.of(columnInfo("emp_no", "unsupported")))
                 .entry("values", List.of(matchesList().item(null), matchesList().item(null)))
         );
     }
@@ -1013,7 +1053,7 @@ public void testIntegerShortConflict() throws IOException {
         Map result = runEsql("FROM test* | LIMIT 2");
         assertMap(
             result,
-            matchesMap().entry("columns", List.of(columnInfo("emp_no", "unsupported")))
+            matchesMapWithOptionalTook(result.get("took")).entry("columns", List.of(columnInfo("emp_no", "unsupported")))
                 .entry("values", List.of(matchesList().item(null), matchesList().item(null)))
         );
     }
@@ -1312,7 +1352,7 @@ void test(Object value, Object expectedValue) throws IOException {
                 values = values.item(expectedValue);
             }
 
-            assertMap(result, matchesMap().entry("columns", columns).entry("values", List.of(values)));
+            assertMap(result, matchesMapWithOptionalTook(result.get("took")).entry("columns", columns).entry("values", List.of(values)));
         }
 
         void createIndex(String name, String fieldName) throws IOException {
diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java
index 759541a9ab5d1..def6491fb920f 100644
--- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java
+++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java
@@ -24,6 +24,7 @@
 import static org.elasticsearch.test.MapMatcher.assertMap;
 import static org.elasticsearch.test.MapMatcher.matchesMap;
 import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
 
 public abstract class RestEnrichTestCase extends ESRestTestCase {
 
@@ -161,16 +162,14 @@ public void testMatchField_ImplicitFieldsList() throws IOException {
         Map result = runEsql("from test | enrich countries | keep number | sort number");
         var columns = List.of(Map.of("name", "number", "type", "long"));
         var values = List.of(List.of(1000), List.of(1000), List.of(5000));
-
-        assertMap(result, matchesMap().entry("columns", columns).entry("values", values));
+        assertMap(result, matchesMap().entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0)));
     }
 
     public void testMatchField_ImplicitFieldsList_WithStats() throws IOException {
         Map result = runEsql("from test | enrich countries | stats s = sum(number) by country_name");
         var columns = List.of(Map.of("name", "s", "type", "long"), Map.of("name", "country_name", "type", "keyword"));
         var values = List.of(List.of(2000, "United States of America"), List.of(5000, "China"));
-
-        assertMap(result, matchesMap().entry("columns", columns).entry("values", values));
+        assertMap(result, matchesMap().entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0)));
     }
 
     private Map runEsql(String query) throws IOException {
diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java
index d9d11c3568ab7..39340ab745a4d 100644
--- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java
+++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java
@@ -290,7 +290,9 @@ public void testNullInAggs() throws IOException {
         Map result = runEsql(builder);
         assertMap(
             result,
-            matchesMap().entry("values", List.of(List.of(1))).entry("columns", List.of(Map.of("name", "min(value)", "type", "long")))
+            matchesMap().entry("values", List.of(List.of(1)))
+                .entry("columns", List.of(Map.of("name", "min(value)", "type", "long")))
+                .entry("took", greaterThanOrEqualTo(0))
         );
 
         builder = requestObjectBuilder().query(fromIndex() + " | stats min(value) by group | sort group, `min(value)`");
@@ -299,6 +301,7 @@ public void testNullInAggs() throws IOException {
             result,
             matchesMap().entry("values", List.of(List.of(2, 0), List.of(1, 1)))
                 .entry("columns", List.of(Map.of("name", "min(value)", "type", "long"), Map.of("name", "group", "type", "long")))
+                .entry("took", greaterThanOrEqualTo(0))
         );
     }
 
@@ -556,7 +559,7 @@ public void testMetadataFieldsOnMultipleIndices() throws IOException {
         );
         var values = List.of(List.of(3, testIndexName() + "-2", 1, "id-2"), List.of(2, testIndexName() + "-1", 2, "id-1"));
 
-        assertMap(result, matchesMap().entry("columns", columns).entry("values", values));
+        assertMap(result, matchesMap().entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0)));
 
         assertThat(deleteIndex(testIndexName() + "-1").isAcknowledged(), is(true)); // clean up
         assertThat(deleteIndex(testIndexName() + "-2").isAcknowledged(), is(true)); // clean up
@@ -746,7 +749,7 @@ public void testInlineStatsNow() throws IOException {
                     .item(matchesMap().entry("name", "value").entry("type", "long"))
                     .item(matchesMap().entry("name", "now").entry("type", "date"))
                     .item(matchesMap().entry("name", "AVG(value)").entry("type", "double"))
-            ).entry("values", values)
+            ).entry("values", values).entry("took", greaterThanOrEqualTo(0))
         );
     }
 
@@ -760,10 +763,13 @@ public void testTopLevelFilter() throws IOException {
             }
             b.endObject();
         }).query(fromIndex() + " | STATS SUM(value)");
+
+        Map result = runEsql(builder);
         assertMap(
-            runEsql(builder),
+            result,
             matchesMap().entry("columns", matchesList().item(matchesMap().entry("name", "SUM(value)").entry("type", "long")))
                 .entry("values", List.of(List.of(499500)))
+                .entry("took", greaterThanOrEqualTo(0))
         );
     }
 
@@ -777,10 +783,12 @@ public void testTopLevelFilterMerged() throws IOException {
             }
             b.endObject();
         }).query(fromIndex() + " | WHERE value == 12 | STATS SUM(value)");
+        Map result = runEsql(builder);
         assertMap(
-            runEsql(builder),
+            result,
             matchesMap().entry("columns", matchesList().item(matchesMap().entry("name", "SUM(value)").entry("type", "long")))
                 .entry("values", List.of(List.of(12)))
+                .entry("took", greaterThanOrEqualTo(0))
         );
     }
 
@@ -809,10 +817,12 @@ public void testTopLevelFilterBoolMerged() throws IOException {
                 }
                 b.endObject();
             }).query(fromIndex() + " | WHERE @timestamp > \"2010-01-01\" | STATS SUM(value)");
+            Map result = runEsql(builder);
             assertMap(
-                runEsql(builder),
+                result,
                 matchesMap().entry("columns", matchesList().item(matchesMap().entry("name", "SUM(value)").entry("type", "long")))
                     .entry("values", List.of(List.of(12)))
+                    .entry("took", greaterThanOrEqualTo(0))
             );
         }
     }
diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/ConfigurationTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/ConfigurationTestUtils.java
index fce88be2a3750..39e79b33327a9 100644
--- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/ConfigurationTestUtils.java
+++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/ConfigurationTestUtils.java
@@ -70,7 +70,8 @@ public static Configuration randomConfiguration(String query, Map clusters = executionInfo.clusterAliases()
+            .stream()
+            .map(alias -> executionInfo.getCluster(alias))
+            .collect(Collectors.toList());
+
+        for (EsqlExecutionInfo.Cluster cluster : clusters) {
+            assertThat(cluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(cluster.getIndexExpression(), equalTo("events"));
+            assertThat(cluster.getTotalShards(), equalTo(1));
+            assertThat(cluster.getSuccessfulShards(), equalTo(1));
+            assertThat(cluster.getSkippedShards(), equalTo(0));
+            assertThat(cluster.getFailedShards(), equalTo(0));
+        }
+    }
+
     public static class LocalStateEnrich extends LocalStateCompositeXPackPlugin {
 
         public LocalStateEnrich(final Settings settings, final Path configPath) throws Exception {
diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java
index 35c37eea10362..03757d44a9f58 100644
--- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java
+++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java
@@ -27,12 +27,13 @@
 import org.elasticsearch.transport.TransportService;
 import org.elasticsearch.xpack.esql.plugin.EsqlPlugin;
 import org.elasticsearch.xpack.esql.plugin.QueryPragmas;
-import org.junit.Before;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
@@ -41,6 +42,7 @@
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.greaterThanOrEqualTo;
 import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
 
 public class CrossClustersQueryIT extends AbstractMultiClustersTestCase {
     private static final String REMOTE_CLUSTER = "cluster-a";
@@ -50,6 +52,11 @@ protected Collection remoteClusterAlias() {
         return List.of(REMOTE_CLUSTER);
     }
 
+    @Override
+    protected Map skipUnavailableForRemoteClusters() {
+        return Map.of(REMOTE_CLUSTER, randomBoolean());
+    }
+
     @Override
     protected Collection> nodePlugins(String clusterAlias) {
         List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias));
@@ -71,57 +78,299 @@ public List> getSettings() {
         }
     }
 
-    @Before
-    public void populateLocalIndices() {
-        Client localClient = client(LOCAL_CLUSTER);
-        assertAcked(
-            localClient.admin()
-                .indices()
-                .prepareCreate("logs-1")
-                .setSettings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5)))
-                .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long")
-        );
-        for (int i = 0; i < 10; i++) {
-            localClient.prepareIndex("logs-1").setSource("id", "local-" + i, "tag", "local", "v", i).get();
-        }
-        localClient.admin().indices().prepareRefresh("logs-1").get();
-    }
-
-    @Before
-    public void populateRemoteIndices() {
-        Client remoteClient = client(REMOTE_CLUSTER);
-        assertAcked(
-            remoteClient.admin()
-                .indices()
-                .prepareCreate("logs-2")
-                .setSettings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5)))
-                .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long")
-        );
-        for (int i = 0; i < 10; i++) {
-            remoteClient.prepareIndex("logs-2").setSource("id", "remote-" + i, "tag", "remote", "v", i * i).get();
-        }
-        remoteClient.admin().indices().prepareRefresh("logs-2").get();
-    }
-
     public void testSimple() {
+        Map testClusterInfo = setupTwoClusters();
+        int localNumShards = (Integer) testClusterInfo.get("local.num_shards");
+        int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards");
+
         try (EsqlQueryResponse resp = runQuery("from logs-*,*:logs-* | stats sum (v)")) {
             List> values = getValuesList(resp);
             assertThat(values, hasSize(1));
             assertThat(values.get(0), equalTo(List.of(330L)));
+
+            EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+            assertNotNull(executionInfo);
+            assertThat(executionInfo.isCrossClusterSearch(), is(true));
+            assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+
+            assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER)));
+
+            EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+            assertThat(remoteCluster.getIndexExpression(), equalTo("logs-*"));
+            assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(remoteCluster.getTotalShards(), equalTo(remoteNumShards));
+            assertThat(remoteCluster.getSuccessfulShards(), equalTo(remoteNumShards));
+            assertThat(remoteCluster.getSkippedShards(), equalTo(0));
+            assertThat(remoteCluster.getFailedShards(), equalTo(0));
+
+            EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER);
+            assertThat(localCluster.getIndexExpression(), equalTo("logs-*"));
+            assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(localCluster.getTotalShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSkippedShards(), equalTo(0));
+            assertThat(localCluster.getFailedShards(), equalTo(0));
         }
+
         try (EsqlQueryResponse resp = runQuery("from logs-*,*:logs-* | stats count(*) by tag | sort tag | keep tag")) {
             List> values = getValuesList(resp);
             assertThat(values, hasSize(2));
             assertThat(values.get(0), equalTo(List.of("local")));
             assertThat(values.get(1), equalTo(List.of("remote")));
+
+            EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+            assertNotNull(executionInfo);
+            assertThat(executionInfo.isCrossClusterSearch(), is(true));
+            assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+
+            assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER)));
+
+            EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+            assertThat(remoteCluster.getIndexExpression(), equalTo("logs-*"));
+            assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(remoteCluster.getTotalShards(), equalTo(remoteNumShards));
+            assertThat(remoteCluster.getSuccessfulShards(), equalTo(remoteNumShards));
+            assertThat(remoteCluster.getSkippedShards(), equalTo(0));
+            assertThat(remoteCluster.getFailedShards(), equalTo(0));
+
+            EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER);
+            assertThat(localCluster.getIndexExpression(), equalTo("logs-*"));
+            assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(localCluster.getTotalShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSkippedShards(), equalTo(0));
+            assertThat(localCluster.getFailedShards(), equalTo(0));
+        }
+    }
+
+    public void testSearchesWhereMissingIndicesAreSpecified() {
+        Map testClusterInfo = setupTwoClusters();
+        int localNumShards = (Integer) testClusterInfo.get("local.num_shards");
+        int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards");
+
+        // since a valid local index was specified, the invalid index on cluster-a does not throw an exception,
+        // but instead is simply ignored - ensure this is captured in the EsqlExecutionInfo
+        try (EsqlQueryResponse resp = runQuery("from logs-*,cluster-a:no_such_index | stats sum (v)")) {
+            EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+            List> values = getValuesList(resp);
+            assertThat(values, hasSize(1));
+            assertThat(values.get(0), equalTo(List.of(45L)));
+
+            assertNotNull(executionInfo);
+            assertThat(executionInfo.isCrossClusterSearch(), is(true));
+            assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+
+            assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER)));
+
+            EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+            assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index"));
+            assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED));
+            assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(remoteCluster.getTotalShards(), equalTo(0));  // 0 since no matching index, thus no shards to search
+            assertThat(remoteCluster.getSuccessfulShards(), equalTo(0));
+            assertThat(remoteCluster.getSkippedShards(), equalTo(0));
+            assertThat(remoteCluster.getFailedShards(), equalTo(0));
+
+            EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER);
+            assertThat(localCluster.getIndexExpression(), equalTo("logs-*"));
+            assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(localCluster.getTotalShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSkippedShards(), equalTo(0));
+            assertThat(localCluster.getFailedShards(), equalTo(0));
+        }
+
+        // since the remote cluster has a valid index expression, the missing local index is ignored
+        // make this is captured in the EsqlExecutionInfo
+        try (EsqlQueryResponse resp = runQuery("from no_such_index,*:logs-* | stats count(*) by tag | sort tag | keep tag")) {
+            List> values = getValuesList(resp);
+            assertThat(values, hasSize(1));
+            assertThat(values.get(0), equalTo(List.of("remote")));
+
+            EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+            assertNotNull(executionInfo);
+            assertThat(executionInfo.isCrossClusterSearch(), is(true));
+            assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+
+            assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER)));
+
+            EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+            assertThat(remoteCluster.getIndexExpression(), equalTo("logs-*"));
+            assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(remoteCluster.getTotalShards(), equalTo(remoteNumShards));
+            assertThat(remoteCluster.getSuccessfulShards(), equalTo(remoteNumShards));
+            assertThat(remoteCluster.getSkippedShards(), equalTo(0));
+            assertThat(remoteCluster.getFailedShards(), equalTo(0));
+
+            EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER);
+            assertThat(localCluster.getIndexExpression(), equalTo("no_such_index"));
+            assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(localCluster.getTotalShards(), equalTo(0));
+            assertThat(localCluster.getSuccessfulShards(), equalTo(0));
+            assertThat(localCluster.getSkippedShards(), equalTo(0));
+            assertThat(localCluster.getFailedShards(), equalTo(0));
+        }
+
+        // when multiple invalid indices are specified on the remote cluster, both should be ignored and present
+        // in the index expression of the EsqlExecutionInfo and with an indication that zero shards were searched
+        try (
+            EsqlQueryResponse resp = runQuery(
+                "FROM no_such_index*,*:no_such_index1,*:no_such_index2,logs-1 | STATS COUNT(*) by tag | SORT tag | KEEP tag"
+            )
+        ) {
+            List> values = getValuesList(resp);
+            assertThat(values, hasSize(1));
+            assertThat(values.get(0), equalTo(List.of("local")));
+
+            EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+            assertNotNull(executionInfo);
+            assertThat(executionInfo.isCrossClusterSearch(), is(true));
+            assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+
+            assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER)));
+
+            EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+            assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index1,no_such_index2"));
+            assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED));
+            assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(remoteCluster.getTotalShards(), equalTo(0));
+            assertThat(remoteCluster.getSuccessfulShards(), equalTo(0));
+            assertThat(remoteCluster.getSkippedShards(), equalTo(0));
+            assertThat(remoteCluster.getFailedShards(), equalTo(0));
+
+            EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER);
+            assertThat(localCluster.getIndexExpression(), equalTo("no_such_index*,logs-1"));
+            assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(localCluster.getTotalShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSkippedShards(), equalTo(0));
+            assertThat(localCluster.getFailedShards(), equalTo(0));
+        }
+
+        // wildcard on remote cluster that matches nothing - should be present in EsqlExecutionInfo marked as SKIPPED, no shards searched
+        try (EsqlQueryResponse resp = runQuery("from cluster-a:no_such_index*,logs-* | stats sum (v)")) {
+            EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+            List> values = getValuesList(resp);
+            assertThat(values, hasSize(1));
+            assertThat(values.get(0), equalTo(List.of(45L)));
+
+            assertNotNull(executionInfo);
+            assertThat(executionInfo.isCrossClusterSearch(), is(true));
+            assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+
+            assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER)));
+
+            EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+            assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index*"));
+            assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED));
+            assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(remoteCluster.getTotalShards(), equalTo(0));
+            assertThat(remoteCluster.getSuccessfulShards(), equalTo(0));
+            assertThat(remoteCluster.getSkippedShards(), equalTo(0));
+            assertThat(remoteCluster.getFailedShards(), equalTo(0));
+
+            EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER);
+            assertThat(localCluster.getIndexExpression(), equalTo("logs-*"));
+            assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(localCluster.getTotalShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSkippedShards(), equalTo(0));
+            assertThat(localCluster.getFailedShards(), equalTo(0));
+        }
+    }
+
+    public void testSearchesWhereNonExistentClusterIsSpecifiedWithWildcards() {
+        Map testClusterInfo = setupTwoClusters();
+        int localNumShards = (Integer) testClusterInfo.get("local.num_shards");
+
+        // a query which matches no remote cluster is not a cross cluster search
+        try (EsqlQueryResponse resp = runQuery("from logs-*,x*:no_such_index* | stats sum (v)")) {
+            EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+            List> values = getValuesList(resp);
+            assertThat(values, hasSize(1));
+            assertThat(values.get(0), equalTo(List.of(45L)));
+
+            assertNotNull(executionInfo);
+            assertThat(executionInfo.clusterAliases(), equalTo(Set.of(LOCAL_CLUSTER)));
+            assertThat(executionInfo.isCrossClusterSearch(), is(false));
+            // since this not a CCS, only the overall took time in the EsqlExecutionInfo matters
+            assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+        }
+
+        // cluster-foo* matches nothing and so should not be present in the EsqlExecutionInfo
+        try (EsqlQueryResponse resp = runQuery("from logs-*,no_such_index*,cluster-a:no_such_index*,cluster-foo*:* | stats sum (v)")) {
+            EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+            List> values = getValuesList(resp);
+            assertThat(values, hasSize(1));
+            assertThat(values.get(0), equalTo(List.of(45L)));
+
+            assertNotNull(executionInfo);
+            assertThat(executionInfo.isCrossClusterSearch(), is(true));
+            assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+
+            assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER)));
+
+            EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+            assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index*"));
+            assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED));
+            assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(remoteCluster.getTotalShards(), equalTo(0));
+            assertThat(remoteCluster.getSuccessfulShards(), equalTo(0));
+            assertThat(remoteCluster.getSkippedShards(), equalTo(0));
+            assertThat(remoteCluster.getFailedShards(), equalTo(0));
+
+            EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER);
+            assertThat(localCluster.getIndexExpression(), equalTo("logs-*,no_such_index*"));
+            assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(localCluster.getTotalShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSkippedShards(), equalTo(0));
+            assertThat(localCluster.getFailedShards(), equalTo(0));
         }
     }
 
     public void testMetadataIndex() {
+        Map testClusterInfo = setupTwoClusters();
+        int localNumShards = (Integer) testClusterInfo.get("local.num_shards");
+        int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards");
+
         try (EsqlQueryResponse resp = runQuery("FROM logs*,*:logs* METADATA _index | stats sum(v) by _index | sort _index")) {
             List> values = getValuesList(resp);
             assertThat(values.get(0), equalTo(List.of(285L, "cluster-a:logs-2")));
             assertThat(values.get(1), equalTo(List.of(45L, "logs-1")));
+
+            EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+            assertNotNull(executionInfo);
+            assertThat(executionInfo.isCrossClusterSearch(), is(true));
+            assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+
+            EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+            assertThat(remoteCluster.getIndexExpression(), equalTo("logs*"));
+            assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(remoteCluster.getTotalShards(), equalTo(remoteNumShards));
+            assertThat(remoteCluster.getSuccessfulShards(), equalTo(remoteNumShards));
+            assertThat(remoteCluster.getSkippedShards(), equalTo(0));
+            assertThat(remoteCluster.getFailedShards(), equalTo(0));
+
+            EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER);
+            assertThat(localCluster.getIndexExpression(), equalTo("logs*"));
+            assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(localCluster.getTotalShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSkippedShards(), equalTo(0));
+            assertThat(localCluster.getFailedShards(), equalTo(0));
         }
     }
 
@@ -138,6 +387,10 @@ void waitForNoInitializingShards(Client client, TimeValue timeout, String... ind
     }
 
     public void testProfile() {
+        Map testClusterInfo = setupTwoClusters();
+        int localNumShards = (Integer) testClusterInfo.get("local.num_shards");
+        int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards");
+
         assumeTrue("pragmas only enabled on snapshot builds", Build.current().isSnapshot());
         // uses shard partitioning as segments can be merged during these queries
         var pragmas = new QueryPragmas(Settings.builder().put(QueryPragmas.DATA_PARTITIONING.getKey(), DataPartitioning.SHARD).build());
@@ -167,6 +420,14 @@ public void testProfile() {
                 List drivers = resp.profile().drivers();
                 assertThat(drivers.size(), greaterThanOrEqualTo(2)); // one coordinator and at least one data
                 localOnlyProfiles = drivers.size();
+
+                EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+                assertNotNull(executionInfo);
+                EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+                assertNull(remoteCluster);
+                assertThat(executionInfo.isCrossClusterSearch(), is(false));
+                // since this not a CCS, only the overall took time in the EsqlExecutionInfo matters
+                assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
             }
         }
         final int remoteOnlyProfiles;
@@ -182,6 +443,23 @@ public void testProfile() {
                 List drivers = resp.profile().drivers();
                 assertThat(drivers.size(), greaterThanOrEqualTo(3)); // two coordinators and at least one data
                 remoteOnlyProfiles = drivers.size();
+
+                EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+                assertNotNull(executionInfo);
+                assertThat(executionInfo.isCrossClusterSearch(), is(true));
+                assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+
+                EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+                assertThat(remoteCluster.getIndexExpression(), equalTo("logs*"));
+                assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+                assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+                assertThat(remoteCluster.getTotalShards(), equalTo(remoteNumShards));
+                assertThat(remoteCluster.getSuccessfulShards(), equalTo(remoteNumShards));
+                assertThat(remoteCluster.getSkippedShards(), equalTo(0));
+                assertThat(remoteCluster.getFailedShards(), equalTo(0));
+
+                EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER);
+                assertNull(localCluster);
             }
         }
         final int allProfiles;
@@ -197,12 +475,41 @@ public void testProfile() {
                 List drivers = resp.profile().drivers();
                 assertThat(drivers.size(), greaterThanOrEqualTo(4)); // two coordinators and at least two data
                 allProfiles = drivers.size();
+
+                EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+                assertNotNull(executionInfo);
+                assertThat(executionInfo.isCrossClusterSearch(), is(true));
+                assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+
+                EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+                assertThat(remoteCluster.getIndexExpression(), equalTo("logs*"));
+                assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+                assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+                assertThat(remoteCluster.getTotalShards(), equalTo(remoteNumShards));
+                assertThat(remoteCluster.getSuccessfulShards(), equalTo(remoteNumShards));
+                assertThat(remoteCluster.getSkippedShards(), equalTo(0));
+                assertThat(remoteCluster.getFailedShards(), equalTo(0));
+
+                EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER);
+                assertThat(localCluster.getIndexExpression(), equalTo("logs*"));
+                assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+                assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+                assertThat(localCluster.getTotalShards(), equalTo(localNumShards));
+                assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards));
+                assertThat(localCluster.getSkippedShards(), equalTo(0));
+                assertThat(localCluster.getFailedShards(), equalTo(0));
             }
         }
         assertThat(allProfiles, equalTo(localOnlyProfiles + remoteOnlyProfiles - 1));
     }
 
     public void testWarnings() throws Exception {
+        Map testClusterInfo = setupTwoClusters();
+        String localIndex = (String) testClusterInfo.get("local.index");
+        String remoteIndex = (String) testClusterInfo.get("remote.index");
+        int localNumShards = (Integer) testClusterInfo.get("local.num_shards");
+        int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards");
+
         EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest();
         request.query("FROM logs*,*:logs* | EVAL ip = to_ip(id) | STATS total = sum(v) by ip | LIMIT 10");
         PlainActionFuture future = new PlainActionFuture<>();
@@ -220,6 +527,30 @@ public void testWarnings() throws Exception {
             List> values = getValuesList(resp);
             assertThat(values.get(0).get(0), equalTo(330L));
             assertNull(values.get(0).get(1));
+
+            EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+            assertNotNull(executionInfo);
+            assertThat(executionInfo.isCrossClusterSearch(), is(true));
+            assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L));
+
+            EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER);
+            assertThat(remoteCluster.getIndexExpression(), equalTo("logs*"));
+            assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(remoteCluster.getTotalShards(), equalTo(remoteNumShards));
+            assertThat(remoteCluster.getSuccessfulShards(), equalTo(remoteNumShards));
+            assertThat(remoteCluster.getSkippedShards(), equalTo(0));
+            assertThat(remoteCluster.getFailedShards(), equalTo(0));
+
+            EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER);
+            assertThat(localCluster.getIndexExpression(), equalTo("logs*"));
+            assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL));
+            assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L));
+            assertThat(localCluster.getTotalShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards));
+            assertThat(localCluster.getSkippedShards(), equalTo(0));
+            assertThat(localCluster.getFailedShards(), equalTo(0));
+
             latch.countDown();
         }, e -> {
             latch.countDown();
@@ -232,10 +563,58 @@ protected EsqlQueryResponse runQuery(String query) {
         EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest();
         request.query(query);
         request.pragmas(AbstractEsqlIntegTestCase.randomPragmas());
+        request.profile(true);
         return runQuery(request);
     }
 
     protected EsqlQueryResponse runQuery(EsqlQueryRequest request) {
         return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS);
     }
+
+    Map setupTwoClusters() {
+        String localIndex = "logs-1";
+        int numShardsLocal = randomIntBetween(1, 5);
+        populateLocalIndices(localIndex, numShardsLocal);
+
+        String remoteIndex = "logs-2";
+        int numShardsRemote = randomIntBetween(1, 5);
+        populateRemoteIndices(remoteIndex, numShardsRemote);
+
+        Map clusterInfo = new HashMap<>();
+        clusterInfo.put("local.num_shards", numShardsLocal);
+        clusterInfo.put("local.index", localIndex);
+        clusterInfo.put("remote.num_shards", numShardsRemote);
+        clusterInfo.put("remote.index", remoteIndex);
+        return clusterInfo;
+    }
+
+    void populateLocalIndices(String indexName, int numShards) {
+        Client localClient = client(LOCAL_CLUSTER);
+        assertAcked(
+            localClient.admin()
+                .indices()
+                .prepareCreate(indexName)
+                .setSettings(Settings.builder().put("index.number_of_shards", numShards))
+                .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long")
+        );
+        for (int i = 0; i < 10; i++) {
+            localClient.prepareIndex(indexName).setSource("id", "local-" + i, "tag", "local", "v", i).get();
+        }
+        localClient.admin().indices().prepareRefresh(indexName).get();
+    }
+
+    void populateRemoteIndices(String indexName, int numShards) {
+        Client remoteClient = client(REMOTE_CLUSTER);
+        assertAcked(
+            remoteClient.admin()
+                .indices()
+                .prepareCreate(indexName)
+                .setSettings(Settings.builder().put("index.number_of_shards", numShards))
+                .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long")
+        );
+        for (int i = 0; i < 10; i++) {
+            remoteClient.prepareIndex(indexName).setSource("id", "remote-" + i, "tag", "remote", "v", i * i).get();
+        }
+        remoteClient.admin().indices().prepareRefresh(indexName).get();
+    }
 }
diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java
new file mode 100644
index 0000000000000..b01aff2a09bd4
--- /dev/null
+++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java
@@ -0,0 +1,527 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.esql.action;
+
+import org.elasticsearch.common.collect.Iterators;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.xcontent.ChunkedToXContentHelper;
+import org.elasticsearch.common.xcontent.ChunkedToXContentObject;
+import org.elasticsearch.core.Predicates;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.rest.action.RestActions;
+import org.elasticsearch.transport.RemoteClusterAware;
+import org.elasticsearch.transport.RemoteClusterService;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.ToXContent;
+import org.elasticsearch.xcontent.ToXContentFragment;
+import org.elasticsearch.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+import java.util.function.BiFunction;
+import java.util.function.Predicate;
+
+/**
+ * Holds execution metadata about ES|QL queries for cross-cluster searches in order to display
+ * this information in ES|QL JSON responses.
+ * Patterned after the SearchResponse.Clusters and SearchResponse.Cluster classes.
+ */
+public class EsqlExecutionInfo implements ChunkedToXContentObject, Writeable {
+    // for cross-cluster scenarios where cluster names are shown in API responses, use this string
+    // rather than empty string (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) we use internally
+    public static final String LOCAL_CLUSTER_NAME_REPRESENTATION = "(local)";
+
+    public static final ParseField TOTAL_FIELD = new ParseField("total");
+    public static final ParseField SUCCESSFUL_FIELD = new ParseField("successful");
+    public static final ParseField SKIPPED_FIELD = new ParseField("skipped");
+    public static final ParseField RUNNING_FIELD = new ParseField("running");
+    public static final ParseField PARTIAL_FIELD = new ParseField("partial");
+    public static final ParseField FAILED_FIELD = new ParseField("failed");
+    public static final ParseField DETAILS_FIELD = new ParseField("details");
+    public static final ParseField TOOK = new ParseField("took");
+
+    // map key is clusterAlias on the primary querying cluster of a CCS minimize_roundtrips=true query
+    // the Map itself is immutable after construction - all Clusters will be accounted for at the start of the search
+    // updates to the Cluster occur with the updateCluster method that given the key to map transforms an
+    // old Cluster Object to a new Cluster Object with the remapping function.
+    public final Map clusterInfo;
+    // not Writeable since it is only needed on the primary CCS coordinator
+    private final transient Predicate skipUnavailablePredicate;
+    private TimeValue overallTook;
+
+    public EsqlExecutionInfo() {
+        this(Predicates.always());  // default all clusters to skip_unavailable=true
+    }
+
+    /**
+     * @param skipUnavailablePredicate provide lookup for whether a given cluster has skip_unavailable set to true or false
+     */
+    public EsqlExecutionInfo(Predicate skipUnavailablePredicate) {
+        this.clusterInfo = ConcurrentCollections.newConcurrentMap();
+        this.skipUnavailablePredicate = skipUnavailablePredicate;
+    }
+
+    /**
+     * For testing use with fromXContent parsing only
+     * @param clusterInfo
+     */
+    EsqlExecutionInfo(ConcurrentMap clusterInfo) {
+        this.clusterInfo = clusterInfo;
+        this.skipUnavailablePredicate = Predicates.always();
+    }
+
+    public EsqlExecutionInfo(StreamInput in) throws IOException {
+        this.overallTook = in.readOptionalTimeValue();
+        List clusterList = in.readCollectionAsList(EsqlExecutionInfo.Cluster::new);
+        if (clusterList.isEmpty()) {
+            this.clusterInfo = ConcurrentCollections.newConcurrentMap();
+        } else {
+            Map m = ConcurrentCollections.newConcurrentMap();
+            clusterList.forEach(c -> m.put(c.getClusterAlias(), c));
+            this.clusterInfo = m;
+        }
+        this.skipUnavailablePredicate = Predicates.always();
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        out.writeOptionalTimeValue(overallTook);
+        if (clusterInfo != null) {
+            out.writeCollection(clusterInfo.values().stream().toList());
+        } else {
+            out.writeCollection(Collections.emptyList());
+        }
+    }
+
+    public void overallTook(TimeValue took) {
+        this.overallTook = took;
+    }
+
+    public TimeValue overallTook() {
+        return overallTook;
+    }
+
+    public Set clusterAliases() {
+        return clusterInfo.keySet();
+    }
+
+    /**
+     * @param clusterAlias to lookup skip_unavailable from
+     * @return skip_unavailable setting (true/false)
+     * @throws org.elasticsearch.transport.NoSuchRemoteClusterException if clusterAlias is unknown to this node's RemoteClusterService
+     */
+    public boolean isSkipUnavailable(String clusterAlias) {
+        if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) {
+            return false;
+        }
+        return skipUnavailablePredicate.test(clusterAlias);
+    }
+
+    public boolean isCrossClusterSearch() {
+        return clusterInfo.size() > 1
+            || clusterInfo.size() == 1 && clusterInfo.containsKey(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY) == false;
+    }
+
+    public Cluster getCluster(String clusterAlias) {
+        return clusterInfo.get(clusterAlias);
+    }
+
+    /**
+     * Utility to swap a Cluster object. Guidelines for the remapping function:
+     * 
    + *
  • The remapping function should return a new Cluster object to swap it for + * the existing one.
  • + *
  • If in the remapping function you decide to abort the swap you must return + * the original Cluster object to keep the map unchanged.
  • + *
  • Do not return {@code null}. If the remapping function returns {@code null}, + * the mapping is removed (or remains absent if initially absent).
  • + *
  • If the remapping function itself throws an (unchecked) exception, the exception + * is rethrown, and the current mapping is left unchanged. Throwing exception therefore + * is OK, but it is generally discouraged.
  • + *
  • The remapping function may be called multiple times in a CAS fashion underneath, + * make sure that is safe to do so.
  • + *
+ * @param clusterAlias key with which the specified value is associated + * @param remappingFunction function to swap the oldCluster to a newCluster + * @return the new Cluster object + */ + public Cluster swapCluster(String clusterAlias, BiFunction remappingFunction) { + return clusterInfo.compute(clusterAlias, remappingFunction); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params params) { + if (isCrossClusterSearch() == false || clusterInfo.isEmpty()) { + return Iterators.concat(); + } + return Iterators.concat( + ChunkedToXContentHelper.startObject(), + ChunkedToXContentHelper.field(TOTAL_FIELD.getPreferredName(), clusterInfo.size()), + ChunkedToXContentHelper.field(SUCCESSFUL_FIELD.getPreferredName(), getClusterStateCount(Cluster.Status.SUCCESSFUL)), + ChunkedToXContentHelper.field(RUNNING_FIELD.getPreferredName(), getClusterStateCount(Cluster.Status.RUNNING)), + ChunkedToXContentHelper.field(SKIPPED_FIELD.getPreferredName(), getClusterStateCount(Cluster.Status.SKIPPED)), + ChunkedToXContentHelper.field(PARTIAL_FIELD.getPreferredName(), getClusterStateCount(Cluster.Status.PARTIAL)), + ChunkedToXContentHelper.field(FAILED_FIELD.getPreferredName(), getClusterStateCount(Cluster.Status.FAILED)), + ChunkedToXContentHelper.xContentFragmentValuesMapCreateOwnName("details", clusterInfo), + ChunkedToXContentHelper.endObject() + ); + } + + /** + * @param status the status you want a count of + * @return how many clusters are currently in a specific state + */ + public int getClusterStateCount(Cluster.Status status) { + assert clusterInfo.size() > 0 : "ClusterMap in EsqlExecutionInfo must not be empty"; + return (int) clusterInfo.values().stream().filter(cluster -> cluster.getStatus() == status).count(); + } + + @Override + public String toString() { + return "EsqlExecutionInfo{" + "overallTook=" + overallTook + ", clusterInfo=" + clusterInfo + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + EsqlExecutionInfo that = (EsqlExecutionInfo) o; + return Objects.equals(clusterInfo, that.clusterInfo) && Objects.equals(overallTook, that.overallTook); + } + + @Override + public int hashCode() { + return Objects.hash(clusterInfo, overallTook); + } + + /** + * Represents the search metadata about a particular cluster involved in a cross-cluster search. + * The Cluster object can represent either the local cluster or a remote cluster. + * For the local cluster, clusterAlias should be specified as RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY. + * Its XContent is put into the "details" section the "_clusters" entry in the REST query response. + * This is an immutable class, so updates made during the search progress (especially important for async + * CCS searches) must be done by replacing the Cluster object with a new one. + */ + public static class Cluster implements ToXContentFragment, Writeable { + public static final ParseField INDICES_FIELD = new ParseField("indices"); + public static final ParseField STATUS_FIELD = new ParseField("status"); + public static final ParseField TOOK = new ParseField("took"); + + private final String clusterAlias; + private final String indexExpression; // original index expression from the user for this cluster + private final boolean skipUnavailable; + private final Cluster.Status status; + private final Integer totalShards; + private final Integer successfulShards; + private final Integer skippedShards; + private final Integer failedShards; + private final TimeValue took; // search latency for this cluster sub-search + + /** + * Marks the status of a Cluster search involved in a Cross-Cluster search. + */ + public enum Status { + RUNNING, // still running + SUCCESSFUL, // all shards completed search + PARTIAL, // only some shards completed the search, partial results from cluster + SKIPPED, // entire cluster was skipped + FAILED; // search was failed due to errors on this cluster + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } + } + + public Cluster(String clusterAlias, String indexExpression) { + this(clusterAlias, indexExpression, true, Cluster.Status.RUNNING, null, null, null, null, null); + } + + /** + * Create a Cluster object representing the initial RUNNING state of a Cluster. + * + * @param clusterAlias clusterAlias as defined in the remote cluster settings or RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY + * for the local cluster + * @param indexExpression the original (not resolved/concrete) indices expression provided for this cluster. + * @param skipUnavailable whether this Cluster is marked as skip_unavailable in remote cluster settings + */ + public Cluster(String clusterAlias, String indexExpression, boolean skipUnavailable) { + this(clusterAlias, indexExpression, skipUnavailable, Cluster.Status.RUNNING, null, null, null, null, null); + } + + /** + * Create a Cluster with a new Status other than the default of RUNNING. + * @param clusterAlias clusterAlias as defined in the remote cluster settings or RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY + * for the local cluster + * @param indexExpression the original (not resolved/concrete) indices expression provided for this cluster. + * @param skipUnavailable whether cluster is marked as skip_unavailable in remote cluster settings + * @param status current status of the search on this Cluster + */ + public Cluster(String clusterAlias, String indexExpression, boolean skipUnavailable, Cluster.Status status) { + this(clusterAlias, indexExpression, skipUnavailable, status, null, null, null, null, null); + } + + public Cluster( + String clusterAlias, + String indexExpression, + boolean skipUnavailable, + Cluster.Status status, + Integer totalShards, + Integer successfulShards, + Integer skippedShards, + Integer failedShards, + TimeValue took + ) { + assert clusterAlias != null : "clusterAlias cannot be null"; + assert indexExpression != null : "indexExpression of Cluster cannot be null"; + assert status != null : "status of Cluster cannot be null"; + this.clusterAlias = clusterAlias; + this.indexExpression = indexExpression; + this.skipUnavailable = skipUnavailable; + this.status = status; + this.totalShards = totalShards; + this.successfulShards = successfulShards; + this.skippedShards = skippedShards; + this.failedShards = failedShards; + this.took = took; + } + + public Cluster(StreamInput in) throws IOException { + this.clusterAlias = in.readString(); + this.indexExpression = in.readString(); + this.status = Cluster.Status.valueOf(in.readString().toUpperCase(Locale.ROOT)); + this.totalShards = in.readOptionalInt(); + this.successfulShards = in.readOptionalInt(); + this.skippedShards = in.readOptionalInt(); + this.failedShards = in.readOptionalInt(); + this.took = in.readOptionalTimeValue(); + this.skipUnavailable = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(clusterAlias); + out.writeString(indexExpression); + out.writeString(status.toString()); + out.writeOptionalInt(totalShards); + out.writeOptionalInt(successfulShards); + out.writeOptionalInt(skippedShards); + out.writeOptionalInt(failedShards); + out.writeOptionalTimeValue(took); + out.writeBoolean(skipUnavailable); + } + + /** + * Since the Cluster object is immutable, use this Builder class to create + * a new Cluster object using the "copyFrom" Cluster passed in and set only + * changed values. + * + * Since the clusterAlias, indexExpression and skipUnavailable fields are + * never changed once set, this Builder provides no setter method for them. + * All other fields can be set and override the value in the "copyFrom" Cluster. + */ + public static class Builder { + private String indexExpression; + private Cluster.Status status; + private Integer totalShards; + private Integer successfulShards; + private Integer skippedShards; + private Integer failedShards; + private TimeValue took; + private final Cluster original; + + public Builder(Cluster copyFrom) { + this.original = copyFrom; + } + + /** + * @return new Cluster object using the new values passed in via setters + * or the values in the "copyFrom" Cluster object set in the + * Builder constructor. + */ + public Cluster build() { + return new Cluster( + original.getClusterAlias(), + indexExpression == null ? original.getIndexExpression() : indexExpression, + original.isSkipUnavailable(), + status != null ? status : original.getStatus(), + totalShards != null ? totalShards : original.getTotalShards(), + successfulShards != null ? successfulShards : original.getSuccessfulShards(), + skippedShards != null ? skippedShards : original.getSkippedShards(), + failedShards != null ? failedShards : original.getFailedShards(), + took != null ? took : original.getTook() + ); + } + + public Cluster.Builder setIndexExpression(String indexExpression) { + this.indexExpression = indexExpression; + return this; + } + + public Cluster.Builder setStatus(Cluster.Status status) { + this.status = status; + return this; + } + + public Cluster.Builder setTotalShards(int totalShards) { + this.totalShards = totalShards; + return this; + } + + public Cluster.Builder setSuccessfulShards(int successfulShards) { + this.successfulShards = successfulShards; + return this; + } + + public Cluster.Builder setSkippedShards(int skippedShards) { + this.skippedShards = skippedShards; + return this; + } + + public Cluster.Builder setFailedShards(int failedShards) { + this.failedShards = failedShards; + return this; + } + + public Cluster.Builder setTook(TimeValue took) { + this.took = took; + return this; + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + String name = clusterAlias; + if (clusterAlias.equals("")) { + name = LOCAL_CLUSTER_NAME_REPRESENTATION; + } + builder.startObject(name); + { + builder.field(STATUS_FIELD.getPreferredName(), getStatus().toString()); + builder.field(INDICES_FIELD.getPreferredName(), indexExpression); + if (took != null) { + // TODO: change this to took_nanos and call took.nanos? + builder.field(TOOK.getPreferredName(), took.millis()); + } + if (totalShards != null) { + builder.startObject(RestActions._SHARDS_FIELD.getPreferredName()); + builder.field(RestActions.TOTAL_FIELD.getPreferredName(), totalShards); + if (successfulShards != null) { + builder.field(RestActions.SUCCESSFUL_FIELD.getPreferredName(), successfulShards); + } + if (skippedShards != null) { + builder.field(RestActions.SKIPPED_FIELD.getPreferredName(), skippedShards); + } + if (failedShards != null) { + builder.field(RestActions.FAILED_FIELD.getPreferredName(), failedShards); + } + builder.endObject(); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean isFragment() { + return ToXContentFragment.super.isFragment(); + } + + public String getClusterAlias() { + return clusterAlias; + } + + public String getIndexExpression() { + return indexExpression; + } + + public boolean isSkipUnavailable() { + return skipUnavailable; + } + + public Cluster.Status getStatus() { + return status; + } + + public TimeValue getTook() { + return took; + } + + public Integer getTotalShards() { + return totalShards; + } + + public Integer getSuccessfulShards() { + return successfulShards; + } + + public Integer getSkippedShards() { + return skippedShards; + } + + public Integer getFailedShards() { + return failedShards; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Cluster cluster = (Cluster) o; + return Objects.equals(clusterAlias, cluster.clusterAlias) + && Objects.equals(indexExpression, cluster.indexExpression) + && status == cluster.status + && Objects.equals(totalShards, cluster.totalShards) + && Objects.equals(successfulShards, cluster.successfulShards) + && Objects.equals(skippedShards, cluster.skippedShards) + && Objects.equals(failedShards, cluster.failedShards) + && Objects.equals(took, cluster.took); + } + + @Override + public int hashCode() { + return Objects.hash(clusterAlias, indexExpression, status, totalShards, successfulShards, skippedShards, failedShards, took); + } + + @Override + public String toString() { + return "Cluster{" + + "alias='" + + clusterAlias + + '\'' + + ", status=" + + status + + ", totalShards=" + + totalShards + + ", successfulShards=" + + successfulShards + + ", skippedShards=" + + skippedShards + + ", failedShards=" + + failedShards + + ", took=" + + took + + ", indexExpression='" + + indexExpression + + '\'' + + ", skipUnavailable=" + + skipUnavailable + + '}'; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index 81fbda2ad6fee..146a88128da35 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -53,6 +53,7 @@ public class EsqlQueryResponse extends org.elasticsearch.xpack.core.esql.action. private final boolean isRunning; // True if this response is as a result of an async query request private final boolean isAsync; + private final EsqlExecutionInfo executionInfo; public EsqlQueryResponse( List columns, @@ -61,7 +62,8 @@ public EsqlQueryResponse( boolean columnar, @Nullable String asyncExecutionId, boolean isRunning, - boolean isAsync + boolean isAsync, + EsqlExecutionInfo executionInfo ) { this.columns = columns; this.pages = pages; @@ -70,10 +72,18 @@ public EsqlQueryResponse( this.asyncExecutionId = asyncExecutionId; this.isRunning = isRunning; this.isAsync = isAsync; + this.executionInfo = executionInfo; } - public EsqlQueryResponse(List columns, List pages, @Nullable Profile profile, boolean columnar, boolean isAsync) { - this(columns, pages, profile, columnar, null, false, isAsync); + public EsqlQueryResponse( + List columns, + List pages, + @Nullable Profile profile, + boolean columnar, + boolean isAsync, + EsqlExecutionInfo executionInfo + ) { + this(columns, pages, profile, columnar, null, false, isAsync, executionInfo); } /** @@ -103,7 +113,11 @@ static EsqlQueryResponse deserialize(BlockStreamInput in) throws IOException { profile = in.readOptionalWriteable(Profile::new); } boolean columnar = in.readBoolean(); - return new EsqlQueryResponse(columns, pages, profile, columnar, asyncExecutionId, isRunning, isAsync); + EsqlExecutionInfo executionInfo = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + executionInfo = in.readOptionalWriteable(EsqlExecutionInfo::new); + } + return new EsqlQueryResponse(columns, pages, profile, columnar, asyncExecutionId, isRunning, isAsync, executionInfo); } @Override @@ -119,6 +133,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(profile); } out.writeBoolean(columnar); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + out.writeOptionalWriteable(executionInfo); + } } public List columns() { @@ -164,6 +181,10 @@ public boolean isAsync() { return isRunning; } + public EsqlExecutionInfo getExecutionInfo() { + return executionInfo; + } + private Iterator asyncPropertiesOrEmpty() { if (isAsync) { return ChunkedToXContentHelper.singleChunk((builder, params) -> { @@ -182,6 +203,17 @@ private Iterator asyncPropertiesOrEmpty() { public Iterator toXContentChunked(ToXContent.Params params) { boolean dropNullColumns = params.paramAsBoolean(DROP_NULL_COLUMNS_OPTION, false); boolean[] nullColumns = dropNullColumns ? nullColumns() : null; + + Iterator tookTime; + if (executionInfo != null && executionInfo.overallTook() != null) { + tookTime = ChunkedToXContentHelper.singleChunk((builder, p) -> { + builder.field("took", executionInfo.overallTook().millis()); + return builder; + }); + } else { + tookTime = Collections.emptyIterator(); + } + Iterator columnHeadings = dropNullColumns ? Iterators.concat( ResponseXContentUtils.allColumns(columns, "all_columns"), @@ -192,11 +224,16 @@ public Iterator toXContentChunked(ToXContent.Params params Iterator profileRender = profile == null ? List.of().iterator() : ChunkedToXContentHelper.field("profile", profile, params); + Iterator executionInfoRender = executionInfo == null || executionInfo.isCrossClusterSearch() == false + ? List.of().iterator() + : ChunkedToXContentHelper.field("_clusters", executionInfo, params); return Iterators.concat( ChunkedToXContentHelper.startObject(), asyncPropertiesOrEmpty(), + tookTime, columnHeadings, ChunkedToXContentHelper.array("values", valuesIt), + executionInfoRender, profileRender, ChunkedToXContentHelper.endObject() ); @@ -234,7 +271,8 @@ public boolean equals(Object o) { && Objects.equals(isRunning, that.isRunning) && columnar == that.columnar && Iterators.equals(values(), that.values(), (row1, row2) -> Iterators.equals(row1, row2, Objects::equals)) - && Objects.equals(profile, that.profile); + && Objects.equals(profile, that.profile) + && Objects.equals(executionInfo, that.executionInfo); } @Override @@ -244,7 +282,8 @@ public int hashCode() { isRunning, columns, Iterators.hashCode(values(), row -> Iterators.hashCode(row, Objects::hashCode)), - columnar + columnar, + executionInfo ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java index 917355b2d88b5..b12cf4eb354bf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java @@ -33,6 +33,6 @@ public EsqlQueryTask( @Override public EsqlQueryResponse getCurrentResult() { - return new EsqlQueryResponse(List.of(), List.of(), null, false, getExecutionId().getEncoded(), true, true); + return new EsqlQueryResponse(List.of(), List.of(), null, false, getExecutionId().getEncoded(), true, true, null); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java index 5ce1ca25c5913..1c88fe6f45d81 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java @@ -153,8 +153,7 @@ private RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws IOExce releasable ); } - long tookNanos = stopWatch.stop().getNanos(); - restResponse.addHeader(HEADER_NAME_TOOK_NANOS, Long.toString(tookNanos)); + restResponse.addHeader(HEADER_NAME_TOOK_NANOS, Long.toString(getTook(esqlResponse, TimeUnit.NANOSECONDS))); success = true; return restResponse; } finally { @@ -164,6 +163,25 @@ private RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws IOExce } } + /** + * If the {@link EsqlQueryResponse} has overallTook time present, use that, as it persists across + * REST calls, so it works properly with long-running async-searches. + * @param esqlResponse + * @return took time in nanos either from the {@link EsqlQueryResponse} or the stopWatch in this object + */ + private long getTook(EsqlQueryResponse esqlResponse, TimeUnit timeUnit) { + assert timeUnit == TimeUnit.NANOSECONDS || timeUnit == TimeUnit.MILLISECONDS : "Unsupported TimeUnit: " + timeUnit; + TimeValue tookTime = stopWatch.stop(); + if (esqlResponse != null && esqlResponse.getExecutionInfo() != null && esqlResponse.getExecutionInfo().overallTook() != null) { + tookTime = esqlResponse.getExecutionInfo().overallTook(); + } + if (timeUnit == TimeUnit.NANOSECONDS) { + return tookTime.nanos(); + } else { + return tookTime.millis(); + } + } + /** * Log internal server errors all the time and log queries if debug is enabled. */ @@ -181,11 +199,11 @@ public ActionListener wrapWithLogging() { LOGGER.debug( "Finished execution of ESQL query.\nQuery string: [{}]\nExecution time: [{}]ms", esqlQuery, - stopWatch.stop().getMillis() + getTook(r, TimeUnit.MILLISECONDS) ); }, ex -> { // In case of failure, stop the time manually before sending out the response. - long timeMillis = stopWatch.stop().getMillis(); + long timeMillis = getTook(null, TimeUnit.MILLISECONDS); LOGGER.debug("Failed execution of ESQL query.\nQuery string: [{}]\nExecution time: [{}]ms", esqlQuery, timeMillis); listener.onFailure(ex); }); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java index 441fd91ee6b35..7d8e0cd736445 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java @@ -8,7 +8,9 @@ package org.elasticsearch.xpack.esql.execution; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.indices.IndicesExpressionGrouper; import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.PreAnalyzer; import org.elasticsearch.xpack.esql.analysis.Verifier; @@ -56,6 +58,8 @@ public void esql( String sessionId, Configuration cfg, EnrichPolicyResolver enrichPolicyResolver, + EsqlExecutionInfo executionInfo, + IndicesExpressionGrouper indicesExpressionGrouper, BiConsumer> runPhase, ActionListener listener ) { @@ -70,11 +74,12 @@ public void esql( new LogicalPlanOptimizer(new LogicalOptimizerContext(cfg)), mapper, verifier, - planningMetrics + planningMetrics, + indicesExpressionGrouper ); QueryMetric clientId = QueryMetric.fromString("rest"); metrics.total(clientId); - session.execute(request, runPhase, wrap(x -> { + session.execute(request, executionInfo, runPhase, wrap(x -> { planningMetricsManager.publish(planningMetrics, true); listener.onResponse(x); }, ex -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java index 725b6412afc77..371aa1b632309 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java @@ -8,17 +8,25 @@ import org.elasticsearch.core.Nullable; +import java.util.Collections; import java.util.Objects; +import java.util.Set; public final class IndexResolution { - public static IndexResolution valid(EsIndex index) { + + public static IndexResolution valid(EsIndex index, Set unavailableClusters) { Objects.requireNonNull(index, "index must not be null if it was found"); - return new IndexResolution(index, null); + Objects.requireNonNull(unavailableClusters, "unavailableClusters must not be null"); + return new IndexResolution(index, null, unavailableClusters); + } + + public static IndexResolution valid(EsIndex index) { + return valid(index, Collections.emptySet()); } public static IndexResolution invalid(String invalid) { Objects.requireNonNull(invalid, "invalid must not be null to signal that the index is invalid"); - return new IndexResolution(null, invalid); + return new IndexResolution(null, invalid, Collections.emptySet()); } public static IndexResolution notFound(String name) { @@ -30,9 +38,13 @@ public static IndexResolution notFound(String name) { @Nullable private final String invalid; - private IndexResolution(EsIndex index, @Nullable String invalid) { + // remote clusters included in the user's index expression that could not be connected to + private final Set unavailableClusters; + + private IndexResolution(EsIndex index, @Nullable String invalid, Set unavailableClusters) { this.index = index; this.invalid = invalid; + this.unavailableClusters = unavailableClusters; } public boolean matches(String indexName) { @@ -58,18 +70,24 @@ public boolean isValid() { return invalid == null; } + public Set getUnavailableClusters() { + return unavailableClusters; + } + @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { return false; } IndexResolution other = (IndexResolution) obj; - return Objects.equals(index, other.index) && Objects.equals(invalid, other.invalid); + return Objects.equals(index, other.index) + && Objects.equals(invalid, other.invalid) + && Objects.equals(unavailableClusters, other.unavailableClusters); } @Override public int hashCode() { - return Objects.hash(index, invalid); + return Objects.hash(index, invalid, unavailableClusters); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java index 01d50d505f7f2..d8fc4da070767 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java @@ -12,15 +12,20 @@ import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.compute.operator.FailureCollector; import org.elasticsearch.compute.operator.ResponseHeadersCollector; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; /** @@ -29,6 +34,7 @@ * 2. Collects driver profiles from sub tasks. * 3. Collects response headers from sub tasks, specifically warnings emitted during compute * 4. Collects failures and returns the most appropriate exception to the caller. + * 5. Updates {@link EsqlExecutionInfo} for display in the response for cross-cluster searches */ final class ComputeListener implements Releasable { private static final Logger LOGGER = LogManager.getLogger(ComputeService.class); @@ -40,19 +46,132 @@ final class ComputeListener implements Releasable { private final TransportService transportService; private final List collectedProfiles; private final ResponseHeadersCollector responseHeaders; + private final EsqlExecutionInfo esqlExecutionInfo; + private final long queryStartTimeNanos; + // clusterAlias indicating where this ComputeListener is running + // used by the top level ComputeListener in ComputeService on both local and remote clusters + private final String whereRunning; - ComputeListener(TransportService transportService, CancellableTask task, ActionListener delegate) { + /** + * Create a ComputeListener that does not need to gather any metadata in EsqlExecutionInfo + * (currently that's the ComputeListener in DataNodeRequestHandler). + */ + public static ComputeListener create( + TransportService transportService, + CancellableTask task, + ActionListener delegate + ) { + return new ComputeListener(transportService, task, null, null, -1, delegate); + } + + /** + * Create a ComputeListener that gathers metadata in EsqlExecutionInfo + * (currently that's the top level ComputeListener in ComputeService). + * @param clusterAlias the clusterAlias where this ComputeListener is running. For the querying cluster, use + * RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY. For remote clusters that are part of a CCS, + * the remote cluster is given its clusterAlias in the request sent to it, so that should be + * passed in here. This gives context to the ComputeListener as to where this listener is running + * and thus how it should behave with respect to the {@link EsqlExecutionInfo} metadata it gathers. + * @param transportService + * @param task + * @param executionInfo {@link EsqlExecutionInfo} to capture execution metadata + * @param queryStartTimeNanos Start time of the ES|QL query (stored in {@link org.elasticsearch.xpack.esql.session.Configuration}) + * @param delegate + */ + public static ComputeListener create( + String clusterAlias, + TransportService transportService, + CancellableTask task, + EsqlExecutionInfo executionInfo, + long queryStartTimeNanos, + ActionListener delegate + ) { + return new ComputeListener(transportService, task, clusterAlias, executionInfo, queryStartTimeNanos, delegate); + } + + private ComputeListener( + TransportService transportService, + CancellableTask task, + String clusterAlias, + EsqlExecutionInfo executionInfo, + long queryStartTimeNanos, + ActionListener delegate + ) { this.transportService = transportService; this.task = task; this.responseHeaders = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext()); this.collectedProfiles = Collections.synchronizedList(new ArrayList<>()); + this.esqlExecutionInfo = executionInfo; + this.queryStartTimeNanos = queryStartTimeNanos; + this.whereRunning = clusterAlias; + // for the DataNodeHandler ComputeListener, clusterAlias and executionInfo will be null + // for the top level ComputeListener in ComputeService both will be non-null + assert (clusterAlias == null && executionInfo == null) || (clusterAlias != null && executionInfo != null) + : "clusterAlias and executionInfo must both be null or both non-null"; + + // listener that executes after all the sub-listeners refs (created via acquireCompute) have completed this.refs = new RefCountingListener(1, ActionListener.wrap(ignored -> { responseHeaders.finish(); - var result = new ComputeResponse(collectedProfiles.isEmpty() ? List.of() : collectedProfiles.stream().toList()); + ComputeResponse result; + + if (runningOnRemoteCluster()) { + // for remote executions - this ComputeResponse is created on the remote cluster/node and will be serialized and + // received by the acquireCompute method callback on the coordinating cluster + EsqlExecutionInfo.Cluster cluster = esqlExecutionInfo.getCluster(clusterAlias); + result = new ComputeResponse( + collectedProfiles.isEmpty() ? List.of() : collectedProfiles.stream().toList(), + cluster.getTook(), + cluster.getTotalShards(), + cluster.getSuccessfulShards(), + cluster.getSkippedShards(), + cluster.getFailedShards() + ); + } else { + result = new ComputeResponse(collectedProfiles.isEmpty() ? List.of() : collectedProfiles.stream().toList()); + if (coordinatingClusterIsSearchedInCCS()) { + // mark local cluster as finished once the coordinator and all data nodes have finished processing + executionInfo.swapCluster( + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL).build() + ); + } + } delegate.onResponse(result); }, e -> delegate.onFailure(failureCollector.getFailure()))); } + /** + * @return true if the "local" querying/coordinator cluster is being searched in a cross-cluster search + */ + private boolean coordinatingClusterIsSearchedInCCS() { + return esqlExecutionInfo != null + && esqlExecutionInfo.isCrossClusterSearch() + && esqlExecutionInfo.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) != null; + } + + /** + * @return true if this Listener is running on a remote cluster (i.e., not the querying cluster) + */ + private boolean runningOnRemoteCluster() { + return whereRunning != null && whereRunning.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) == false; + } + + /** + * @return true if the listener is in a context where the took time needs to be recorded into the EsqlExecutionInfo + */ + private boolean shouldRecordTookTime() { + return runningOnRemoteCluster() || coordinatingClusterIsSearchedInCCS(); + } + + /** + * @param computeClusterAlias the clusterAlias passed to the acquireCompute method + * @return true if this listener is waiting for a remote response in a CCS search + */ + private boolean isCCSListener(String computeClusterAlias) { + return RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(whereRunning) + && computeClusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) == false; + } + /** * Acquires a new listener that doesn't collect result */ @@ -71,19 +190,60 @@ ActionListener acquireAvoid() { } /** - * Acquires a new listener that collects compute result. This listener will also collects warnings emitted during compute + * Acquires a new listener that collects compute result. This listener will also collect warnings emitted during compute + * @param computeClusterAlias The cluster alias where the compute is happening. Used when metadata needs to be gathered + * into the {@link EsqlExecutionInfo} Cluster objects. Callers that do not required execution + * info to be gathered (namely, the DataNodeRequestHandler ComputeListener) should pass in null. */ - ActionListener acquireCompute() { + ActionListener acquireCompute(@Nullable String computeClusterAlias) { + assert computeClusterAlias == null || (esqlExecutionInfo != null && queryStartTimeNanos > 0) + : "When clusterAlias is provided to acquireCompute, executionInfo must be non-null and queryStartTimeNanos must be positive"; + return acquireAvoid().map(resp -> { responseHeaders.collect(); var profiles = resp.getProfiles(); if (profiles != null && profiles.isEmpty() == false) { collectedProfiles.addAll(profiles); } + if (computeClusterAlias == null) { + return null; + } + if (isCCSListener(computeClusterAlias)) { + // this is the callback for the listener to the CCS compute + esqlExecutionInfo.swapCluster( + computeClusterAlias, + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v) + // for now ESQL doesn't return partial results, so set status to SUCCESSFUL + .setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL) + .setTook(resp.getTook()) + .setTotalShards(resp.getTotalShards()) + .setSuccessfulShards(resp.getSuccessfulShards()) + .setSkippedShards(resp.getSkippedShards()) + .setFailedShards(resp.getFailedShards()) + .build() + ); + } else if (shouldRecordTookTime()) { + // handler for this cluster's data node and coordinator completion (runs on "local" and remote clusters) + TimeValue tookTime = new TimeValue(System.nanoTime() - queryStartTimeNanos, TimeUnit.NANOSECONDS); + esqlExecutionInfo.swapCluster(computeClusterAlias, (k, v) -> { + if (v.getTook() == null || v.getTook().nanos() < tookTime.nanos()) { + return new EsqlExecutionInfo.Cluster.Builder(v).setTook(tookTime).build(); + } else { + return v; + } + }); + } return null; }); } + /** + * Use this method when no execution metadata needs to be added to {@link EsqlExecutionInfo} + */ + ActionListener acquireCompute() { + return acquireCompute(null); + } + @Override public void close() { refs.close(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java index a4235d85cf832..308192704fe0e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.transport.TransportResponse; import java.io.IOException; @@ -22,8 +23,31 @@ final class ComputeResponse extends TransportResponse { private final List profiles; + // for use with ClusterComputeRequests (cross-cluster searches) + private final TimeValue took; // overall took time for a specific cluster in a cross-cluster search + public final int totalShards; + public final int successfulShards; + public final int skippedShards; + public final int failedShards; + ComputeResponse(List profiles) { + this(profiles, null, null, null, null, null); + } + + ComputeResponse( + List profiles, + TimeValue took, + Integer totalShards, + Integer successfulShards, + Integer skippedShards, + Integer failedShards + ) { this.profiles = profiles; + this.took = took; + this.totalShards = totalShards == null ? 0 : totalShards.intValue(); + this.successfulShards = successfulShards == null ? 0 : successfulShards.intValue(); + this.skippedShards = skippedShards == null ? 0 : skippedShards.intValue(); + this.failedShards = failedShards == null ? 0 : failedShards.intValue(); } ComputeResponse(StreamInput in) throws IOException { @@ -37,6 +61,19 @@ final class ComputeResponse extends TransportResponse { } else { profiles = null; } + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + this.took = in.readOptionalTimeValue(); + this.totalShards = in.readVInt(); + this.successfulShards = in.readVInt(); + this.skippedShards = in.readVInt(); + this.failedShards = in.readVInt(); + } else { + this.took = new TimeValue(0L); + this.totalShards = 0; + this.successfulShards = 0; + this.skippedShards = 0; + this.failedShards = 0; + } } @Override @@ -49,9 +86,36 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(profiles); } } + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + out.writeOptionalTimeValue(took); + out.writeVInt(totalShards); + out.writeVInt(successfulShards); + out.writeVInt(skippedShards); + out.writeVInt(failedShards); + } } public List getProfiles() { return profiles; } + + public TimeValue getTook() { + return took; + } + + public int getTotalShards() { + return totalShards; + } + + public int getSuccessfulShards() { + return successfulShards; + } + + public int getSkippedShards() { + return skippedShards; + } + + public int getFailedShards() { + return failedShards; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index fa8a5693c59bb..d1f2007af2757 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -32,6 +32,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.QueryBuilder; @@ -56,6 +57,7 @@ import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.action.EsqlSearchShardsAction; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; @@ -71,12 +73,14 @@ import org.elasticsearch.xpack.esql.session.Result; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME; @@ -130,6 +134,7 @@ public void execute( CancellableTask rootTask, PhysicalPlan physicalPlan, Configuration configuration, + EsqlExecutionInfo execInfo, ActionListener listener ) { Tuple coordinatorAndDataNodePlan = PlannerUtils.breakPlanBetweenCoordinatorAndDataNode( @@ -167,10 +172,13 @@ public void execute( null ); try ( - var computeListener = new ComputeListener( + var computeListener = ComputeListener.create( + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, transportService, rootTask, - listener.map(r -> new Result(physicalPlan.output(), collectedPages, r.getProfiles())) + execInfo, + configuration.getQueryStartTimeNanos(), + listener.map(r -> new Result(physicalPlan.output(), collectedPages, r.getProfiles(), execInfo)) ) ) { runCompute(rootTask, computeContext, coordinatorPlan, computeListener.acquireCompute()); @@ -192,13 +200,16 @@ public void execute( queryPragmas.exchangeBufferSize(), transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) ); + long start = configuration.getQueryStartTimeNanos(); + String local = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; try ( Releasable ignored = exchangeSource.addEmptySink(); - var computeListener = new ComputeListener( - transportService, - rootTask, - listener.map(r -> new Result(physicalPlan.output(), collectedPages, r.getProfiles())) - ) + // this is the top level ComputeListener called once at the end (e.g., once all clusters have finished for a CCS) + var computeListener = ComputeListener.create(local, transportService, rootTask, execInfo, start, listener.map(r -> { + long tookTimeNanos = System.nanoTime() - configuration.getQueryStartTimeNanos(); + execInfo.overallTook(new TimeValue(tookTimeNanos, TimeUnit.NANOSECONDS)); + return new Result(physicalPlan.output(), collectedPages, r.getProfiles(), execInfo); + })) ) { // run compute on the coordinator exchangeSource.addCompletionListener(computeListener.acquireAvoid()); @@ -206,7 +217,7 @@ public void execute( rootTask, new ComputeContext(sessionId, RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, List.of(), configuration, exchangeSource, null), coordinatorPlan, - computeListener.acquireCompute() + computeListener.acquireCompute(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) ); // starts computes on data nodes on the main cluster if (localConcreteIndices != null && localConcreteIndices.indices().length > 0) { @@ -219,6 +230,7 @@ public void execute( Set.of(localConcreteIndices.indices()), localOriginalIndices, exchangeSource, + execInfo, computeListener ); } @@ -266,6 +278,7 @@ private void startComputeOnDataNodes( Set concreteIndices, OriginalIndices originalIndices, ExchangeSourceHandler exchangeSource, + EsqlExecutionInfo executionInfo, ComputeListener computeListener ) { var planWithReducer = configuration.pragmas().nodeLevelReduction() == false @@ -281,11 +294,22 @@ private void startComputeOnDataNodes( // but it would be better to have a proper impl. QueryBuilder requestFilter = PlannerUtils.requestFilter(planWithReducer, x -> true); var lookupListener = ActionListener.releaseAfter(computeListener.acquireAvoid(), exchangeSource.addEmptySink()); - lookupDataNodes(parentTask, clusterAlias, requestFilter, concreteIndices, originalIndices, ActionListener.wrap(dataNodes -> { + // SearchShards API can_match is done in lookupDataNodes + lookupDataNodes(parentTask, clusterAlias, requestFilter, concreteIndices, originalIndices, ActionListener.wrap(dataNodeResult -> { try (RefCountingListener refs = new RefCountingListener(lookupListener)) { + // update ExecutionInfo with shard counts (total and skipped) + executionInfo.swapCluster( + clusterAlias, + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setTotalShards(dataNodeResult.totalShards()) + .setSuccessfulShards(dataNodeResult.totalShards()) + .setSkippedShards(dataNodeResult.skippedShards()) + .setFailedShards(0) + .build() + ); + // For each target node, first open a remote exchange on the remote node, then link the exchange source to // the new remote exchange sink, and initialize the computation on the target node via data-node-request. - for (DataNode node : dataNodes) { + for (DataNode node : dataNodeResult.dataNodes()) { var queryPragmas = configuration.pragmas(); ExchangeService.openExchange( transportService, @@ -296,7 +320,8 @@ private void startComputeOnDataNodes( refs.acquire().delegateFailureAndWrap((l, unused) -> { var remoteSink = exchangeService.newRemoteSink(parentTask, sessionId, transportService, node.connection); exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); - var dataNodeListener = ActionListener.runBefore(computeListener.acquireCompute(), () -> l.onResponse(null)); + ActionListener computeResponseListener = computeListener.acquireCompute(clusterAlias); + var dataNodeListener = ActionListener.runBefore(computeResponseListener, () -> l.onResponse(null)); transportService.sendChildRequest( node.connection, DATA_ACTION_NAME, @@ -345,7 +370,10 @@ private void startComputeOnRemoteClusters( exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); var remotePlan = new RemoteClusterPlan(plan, cluster.concreteIndices, cluster.originalIndices); var clusterRequest = new ClusterComputeRequest(cluster.clusterAlias, sessionId, configuration, remotePlan); - var clusterListener = ActionListener.runBefore(computeListener.acquireCompute(), () -> l.onResponse(null)); + var clusterListener = ActionListener.runBefore( + computeListener.acquireCompute(cluster.clusterAlias()), + () -> l.onResponse(null) + ); transportService.sendChildRequest( cluster.connection, CLUSTER_ACTION_NAME, @@ -412,7 +440,8 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, if (context.configuration.profile()) { return new ComputeResponse(drivers.stream().map(Driver::profile).toList()); } else { - return new ComputeResponse(List.of()); + final ComputeResponse response = new ComputeResponse(List.of()); + return response; } }); listenerCollectingStatus = ActionListener.releaseAfter(listenerCollectingStatus, () -> Releasables.close(drivers)); @@ -494,6 +523,15 @@ record DataNode(Transport.Connection connection, List shardIds, Map dataNodes, int totalShards, int skippedShards) {} + record RemoteCluster(String clusterAlias, Transport.Connection connection, String[] concreteIndices, OriginalIndices originalIndices) { } @@ -510,7 +548,7 @@ private void lookupDataNodes( QueryBuilder filter, Set concreteIndices, OriginalIndices originalIndices, - ActionListener> listener + ActionListener listener ) { ActionListener searchShardsListener = listener.map(resp -> { Map nodes = new HashMap<>(); @@ -519,17 +557,21 @@ private void lookupDataNodes( } Map> nodeToShards = new HashMap<>(); Map> nodeToAliasFilters = new HashMap<>(); + int totalShards = 0; + int skippedShards = 0; for (SearchShardsGroup group : resp.getGroups()) { var shardId = group.shardId(); - if (group.skipped()) { - continue; - } if (group.allocatedNodes().isEmpty()) { throw new ShardNotFoundException(group.shardId(), "no shard copies found {}", group.shardId()); } if (concreteIndices.contains(shardId.getIndexName()) == false) { continue; } + totalShards++; + if (group.skipped()) { + skippedShards++; + continue; + } String targetNode = group.allocatedNodes().get(0); nodeToShards.computeIfAbsent(targetNode, k -> new ArrayList<>()).add(shardId); AliasFilter aliasFilter = resp.getAliasFilters().get(shardId.getIndex().getUUID()); @@ -543,7 +585,7 @@ private void lookupDataNodes( Map aliasFilters = nodeToAliasFilters.getOrDefault(e.getKey(), Map.of()); dataNodes.add(new DataNode(transportService.getConnection(node), e.getValue(), aliasFilters)); } - return dataNodes; + return new DataNodeResult(dataNodes, totalShards, skippedShards); }); SearchShardsRequest searchShardsRequest = new SearchShardsRequest( originalIndices.indices(), @@ -736,7 +778,7 @@ public void messageReceived(DataNodeRequest request, TransportChannel channel, T request.indices(), request.indicesOptions() ); - try (var computeListener = new ComputeListener(transportService, (CancellableTask) task, listener)) { + try (var computeListener = ComputeListener.create(transportService, (CancellableTask) task, listener)) { runComputeOnDataNode((CancellableTask) task, sessionId, reducePlan, request, computeListener); } } @@ -754,15 +796,26 @@ public void messageReceived(ClusterComputeRequest request, TransportChannel chan listener.onFailure(new IllegalStateException("expected exchange sink for a remote compute; got " + plan)); return; } - try (var computeListener = new ComputeListener(transportService, (CancellableTask) task, listener)) { + String clusterAlias = request.clusterAlias(); + /* + * This handler runs only on remote cluster coordinators, so it creates a new local EsqlExecutionInfo object to record + * execution metadata for ES|QL processing local to this cluster. The execution info will be copied into the + * ComputeResponse that is sent back to the primary coordinating cluster. + */ + EsqlExecutionInfo execInfo = new EsqlExecutionInfo(); + execInfo.swapCluster(clusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(clusterAlias, Arrays.toString(request.indices()))); + CancellableTask cancellable = (CancellableTask) task; + long start = request.configuration().getQueryStartTimeNanos(); + try (var computeListener = ComputeListener.create(clusterAlias, transportService, cancellable, execInfo, start, listener)) { runComputeOnRemoteCluster( - request.clusterAlias(), + clusterAlias, request.sessionId(), (CancellableTask) task, request.configuration(), (ExchangeSinkExec) plan, Set.of(remoteClusterPlan.targetIndices()), remoteClusterPlan.originalIndices(), + execInfo, computeListener ); } @@ -786,6 +839,7 @@ void runComputeOnRemoteCluster( ExchangeSinkExec plan, Set concreteIndices, OriginalIndices originalIndices, + EsqlExecutionInfo executionInfo, ComputeListener computeListener ) { final var exchangeSink = exchangeService.getSinkHandler(globalSessionId); @@ -810,7 +864,7 @@ void runComputeOnRemoteCluster( parentTask, new ComputeContext(localSessionId, clusterAlias, List.of(), configuration, exchangeSource, exchangeSink), coordinatorPlan, - computeListener.acquireCompute() + computeListener.acquireCompute(clusterAlias) ); startComputeOnDataNodes( localSessionId, @@ -821,6 +875,7 @@ void runComputeOnRemoteCluster( concreteIndices, originalIndices, exchangeSource, + executionInfo, computeListener ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index 561baa76a01a9..17c795f2de28c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -25,10 +25,12 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.async.AsyncExecutionId; import org.elasticsearch.xpack.esql.action.ColumnInfoImpl; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; @@ -64,6 +66,7 @@ public class TransportEsqlQueryAction extends HandledTransportAction asyncTaskManagementService; + private final RemoteClusterService remoteClusterService; @Inject @SuppressWarnings("this-escape") @@ -114,6 +117,7 @@ public TransportEsqlQueryAction( threadPool, bigArrays ); + this.remoteClusterService = transportService.getRemoteClusterService(); } @Override @@ -159,22 +163,26 @@ private void innerExecute(Task task, EsqlQueryRequest request, ActionListener remoteClusterService.isSkipUnavailable(clusterAlias)); BiConsumer> runPhase = (physicalPlan, resultListener) -> computeService.execute( sessionId, (CancellableTask) task, physicalPlan, configuration, + executionInfo, resultListener ); - planExecutor.esql( request, sessionId, configuration, enrichPolicyResolver, + executionInfo, + remoteClusterService, runPhase, listener.map(result -> toResponse(task, request, configuration, result)) ); @@ -187,9 +195,18 @@ private EsqlQueryResponse toResponse(Task task, EsqlQueryRequest request, Config if (task instanceof EsqlQueryTask asyncTask && request.keepOnCompletion()) { String asyncExecutionId = asyncTask.getExecutionId().getEncoded(); threadPool.getThreadContext().addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_ID_HEADER, asyncExecutionId); - return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), asyncExecutionId, false, request.async()); + return new EsqlQueryResponse( + columns, + result.pages(), + profile, + request.columnar(), + asyncExecutionId, + false, + request.async(), + result.executionInfo() + ); } - return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), request.async()); + return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), request.async(), result.executionInfo()); } /** @@ -245,7 +262,8 @@ public EsqlQueryResponse initialResponse(EsqlQueryTask task) { false, asyncExecutionId, true, // is_running - true // isAsync + true, // isAsync + null ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java index 33a48d2e7df05..0687788ad53fd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java @@ -51,6 +51,7 @@ public class Configuration implements Writeable { private final boolean profile; private final Map> tables; + private final long queryStartTimeNanos; public Configuration( ZoneId zi, @@ -62,7 +63,8 @@ public Configuration( int resultTruncationDefaultSize, String query, boolean profile, - Map> tables + Map> tables, + long queryStartTimeNanos ) { this.zoneId = zi.normalized(); this.now = ZonedDateTime.now(Clock.tick(Clock.system(zoneId), Duration.ofNanos(1))); @@ -76,6 +78,7 @@ public Configuration( this.profile = profile; this.tables = tables; assert tables != null; + this.queryStartTimeNanos = queryStartTimeNanos; } public Configuration(BlockStreamInput in) throws IOException { @@ -98,6 +101,11 @@ public Configuration(BlockStreamInput in) throws IOException { } else { this.tables = Map.of(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + this.queryStartTimeNanos = in.readLong(); + } else { + this.queryStartTimeNanos = -1; + } } @Override @@ -119,6 +127,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_REQUEST_TABLES)) { out.writeMap(tables, (o1, columns) -> o1.writeMap(columns, StreamOutput::writeWriteable)); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + out.writeLong(queryStartTimeNanos); + } } public ZoneId zoneId() { @@ -163,9 +174,17 @@ public String query() { * Note: Currently, it returns {@link System#currentTimeMillis()}, but this value will be serialized between nodes. */ public long absoluteStartedTimeInMillis() { + // MP TODO: I'm confused - Why is this not a fixed value taken at the start of the query processing? return System.currentTimeMillis(); } + /** + * @return Start time of the ESQL query in nanos + */ + public long getQueryStartTimeNanos() { + return queryStartTimeNanos; + } + /** * Tables specified in the request. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 674f2c3c2ee65..608e45bb2085b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -8,15 +8,22 @@ package org.elasticsearch.xpack.esql.session; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.indices.IndicesExpressionGrouper; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; @@ -61,7 +68,9 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.BiFunction; @@ -88,6 +97,7 @@ public class EsqlSession { private final Mapper mapper; private final PhysicalPlanOptimizer physicalPlanOptimizer; private final PlanningMetrics planningMetrics; + private final IndicesExpressionGrouper indicesExpressionGrouper; public EsqlSession( String sessionId, @@ -99,7 +109,8 @@ public EsqlSession( LogicalPlanOptimizer logicalPlanOptimizer, Mapper mapper, Verifier verifier, - PlanningMetrics planningMetrics + PlanningMetrics planningMetrics, + IndicesExpressionGrouper indicesExpressionGrouper ) { this.sessionId = sessionId; this.configuration = configuration; @@ -112,6 +123,7 @@ public EsqlSession( this.logicalPlanOptimizer = logicalPlanOptimizer; this.physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(configuration)); this.planningMetrics = planningMetrics; + this.indicesExpressionGrouper = indicesExpressionGrouper; } public String sessionId() { @@ -123,14 +135,16 @@ public String sessionId() { */ public void execute( EsqlQueryRequest request, + EsqlExecutionInfo executionInfo, BiConsumer> runPhase, ActionListener listener ) { LOGGER.debug("ESQL query:\n{}", request.query()); analyzedPlan( parse(request.query(), request.params()), + executionInfo, listener.delegateFailureAndWrap( - (next, analyzedPlan) -> executeOptimizedPlan(request, runPhase, optimizedPlan(analyzedPlan), next) + (next, analyzedPlan) -> executeOptimizedPlan(request, executionInfo, runPhase, optimizedPlan(analyzedPlan), next) ) ); } @@ -141,6 +155,7 @@ public void execute( */ public void executeOptimizedPlan( EsqlQueryRequest request, + EsqlExecutionInfo executionInfo, BiConsumer> runPhase, LogicalPlan optimizedPlan, ActionListener listener @@ -149,7 +164,7 @@ public void executeOptimizedPlan( if (firstPhase == null) { runPhase.accept(logicalPlanToPhysicalPlan(optimizedPlan, request), listener); } else { - executePhased(new ArrayList<>(), optimizedPlan, request, firstPhase, runPhase, listener); + executePhased(new ArrayList<>(), optimizedPlan, request, executionInfo, firstPhase, runPhase, listener); } } @@ -157,6 +172,7 @@ private void executePhased( List profileAccumulator, LogicalPlan mainPlan, EsqlQueryRequest request, + EsqlExecutionInfo executionInfo, LogicalPlan firstPhase, BiConsumer> runPhase, ActionListener listener @@ -171,10 +187,10 @@ private void executePhased( PhysicalPlan finalPhysicalPlan = logicalPlanToPhysicalPlan(newMainPlan, request); runPhase.accept(finalPhysicalPlan, next.delegateFailureAndWrap((finalListener, finalResult) -> { profileAccumulator.addAll(finalResult.profiles()); - finalListener.onResponse(new Result(finalResult.schema(), finalResult.pages(), profileAccumulator)); + finalListener.onResponse(new Result(finalResult.schema(), finalResult.pages(), profileAccumulator, executionInfo)); })); } else { - executePhased(profileAccumulator, newMainPlan, request, newFirstPhase, runPhase, next); + executePhased(profileAccumulator, newMainPlan, request, executionInfo, newFirstPhase, runPhase, next); } } finally { Releasables.closeExpectNoException(Releasables.wrap(Iterators.map(result.pages().iterator(), p -> p::releaseBlocks))); @@ -188,13 +204,13 @@ private LogicalPlan parse(String query, QueryParams params) { return parsed; } - public void analyzedPlan(LogicalPlan parsed, ActionListener listener) { + public void analyzedPlan(LogicalPlan parsed, EsqlExecutionInfo executionInfo, ActionListener listener) { if (parsed.analyzed()) { listener.onResponse(parsed); return; } - preAnalyze(parsed, (indices, policies) -> { + preAnalyze(parsed, executionInfo, (indices, policies) -> { planningMetrics.gatherPreAnalysisMetrics(parsed); Analyzer analyzer = new Analyzer(new AnalyzerContext(configuration, functionRegistry, indices, policies), verifier); var plan = analyzer.analyze(parsed); @@ -204,7 +220,12 @@ public void analyzedPlan(LogicalPlan parsed, ActionListener listene }, listener); } - private void preAnalyze(LogicalPlan parsed, BiFunction action, ActionListener listener) { + private void preAnalyze( + LogicalPlan parsed, + EsqlExecutionInfo executionInfo, + BiFunction action, + ActionListener listener + ) { PreAnalyzer.PreAnalysis preAnalysis = preAnalyzer.preAnalyze(parsed); var unresolvedPolicies = preAnalysis.enriches.stream() .map(e -> new EnrichPolicyResolver.UnresolvedPolicy((String) e.policyName().fold(), e.mode())) @@ -220,8 +241,10 @@ private void preAnalyze(LogicalPlan parsed, BiFunction { + preAnalyzeIndices(parsed, executionInfo, l.delegateFailureAndWrap((ll, indexResolution) -> { if (indexResolution.isValid()) { + updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + updateExecutionInfoWithUnavailableClusters(executionInfo, indexResolution.getUnavailableClusters()); Set newClusters = enrichPolicyResolver.groupIndicesPerCluster( indexResolution.get().concreteIndices().toArray(String[]::new) ).keySet(); @@ -242,7 +265,51 @@ private void preAnalyze(LogicalPlan parsed, BiFunction listener, Set enrichPolicyMatchFields) { + // visible for testing + static void updateExecutionInfoWithUnavailableClusters(EsqlExecutionInfo executionInfo, Set unavailableClusters) { + for (String clusterAlias : unavailableClusters) { + executionInfo.swapCluster( + clusterAlias, + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED).build() + ); + // TODO: follow-on PR will set SKIPPED status when skip_unavailable=true and throw an exception when skip_un=false + } + } + + // visible for testing + static void updateExecutionInfoWithClustersWithNoMatchingIndices(EsqlExecutionInfo executionInfo, IndexResolution indexResolution) { + Set clustersWithResolvedIndices = new HashSet<>(); + // determine missing clusters + for (String indexName : indexResolution.get().indexNameWithModes().keySet()) { + clustersWithResolvedIndices.add(RemoteClusterAware.parseClusterAlias(indexName)); + } + Set clustersRequested = executionInfo.clusterAliases(); + Set clustersWithNoMatchingIndices = Sets.difference(clustersRequested, clustersWithResolvedIndices); + /* + * These are clusters in the original request that are not present in the field-caps response. They were + * specified with an index or indices that do not exist, so the search on that cluster is done. + * Mark it as SKIPPED with 0 shards searched and took=0. + */ + for (String c : clustersWithNoMatchingIndices) { + executionInfo.swapCluster( + c, + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(EsqlExecutionInfo.Cluster.Status.SKIPPED) + .setTook(new TimeValue(0)) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0) + .build() + ); + } + } + + private void preAnalyzeIndices( + LogicalPlan parsed, + EsqlExecutionInfo executionInfo, + ActionListener listener, + Set enrichPolicyMatchFields + ) { PreAnalyzer.PreAnalysis preAnalysis = new PreAnalyzer().preAnalyze(parsed); // TODO we plan to support joins in the future when possible, but for now we'll just fail early if we see one if (preAnalysis.indices.size() > 1) { @@ -252,6 +319,16 @@ private void preAnalyzeIndices(LogicalPlan parsed, ActionListener clusterIndices = indicesExpressionGrouper.groupIndices(IndicesOptions.DEFAULT, table.index()); + for (Map.Entry entry : clusterIndices.entrySet()) { + final String clusterAlias = entry.getKey(); + String indexExpr = Strings.arrayToCommaDelimitedString(entry.getValue().indices()); + executionInfo.swapCluster(clusterAlias, (k, v) -> { + assert v == null : "No cluster for " + clusterAlias + " should have been added to ExecutionInfo yet"; + return new EsqlExecutionInfo.Cluster(clusterAlias, indexExpr, executionInfo.isSkipUnavailable(clusterAlias)); + }); + } indexResolver.resolveAsMergedMapping(table.index(), fieldNames, listener); } else { try { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java index 0f26a68d3c31e..c0f94bccc50a4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java @@ -6,7 +6,9 @@ */ package org.elasticsearch.xpack.esql.session; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; @@ -18,6 +20,7 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xpack.esql.action.EsqlResolveFieldsAction; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.DateEsField; @@ -155,7 +158,23 @@ public IndexResolution mergedMappings(String indexPattern, FieldCapabilitiesResp for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { concreteIndices.put(ir.getIndexName(), ir.getIndexMode()); } - return IndexResolution.valid(new EsIndex(indexPattern, rootFields, concreteIndices)); + Set unavailableRemoteClusters = determineUnavailableRemoteClusters(fieldCapsResponse.getFailures()); + return IndexResolution.valid(new EsIndex(indexPattern, rootFields, concreteIndices), unavailableRemoteClusters); + } + + // visible for testing + static Set determineUnavailableRemoteClusters(List failures) { + Set unavailableRemotes = new HashSet<>(); + for (FieldCapabilitiesFailure failure : failures) { + if (ExceptionsHelper.isRemoteUnavailableException(failure.getException())) { + for (String indexExpression : failure.getIndices()) { + if (indexExpression.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) > 0) { + unavailableRemotes.add(RemoteClusterAware.parseClusterAlias(indexExpression)); + } + } + } + } + return unavailableRemotes; } private boolean allNested(List caps) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Result.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Result.java index 42beb88bbe38b..4f90893c759b8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Result.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Result.java @@ -10,6 +10,8 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; @@ -25,5 +27,6 @@ * are quite cheap to build, so we build them for all ESQL runs, regardless of if * users have asked for them. But we only include them in the results if users ask * for them. + * @param executionInfo Metadata about the execution of this query. Used for cross cluster queries. */ -public record Result(List schema, List pages, List profiles) {} +public record Result(List schema, List pages, List profiles, @Nullable EsqlExecutionInfo executionInfo) {} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index a0719286a4009..3eef31e1cc406 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -48,6 +48,7 @@ import org.elasticsearch.xpack.esql.CsvTestUtils.ActualResults; import org.elasticsearch.xpack.esql.CsvTestUtils.Type; import org.elasticsearch.xpack.esql.action.EsqlCapabilities; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; @@ -423,7 +424,8 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { new LogicalPlanOptimizer(new LogicalOptimizerContext(configuration)), mapper, TEST_VERIFIER, - new PlanningMetrics() + new PlanningMetrics(), + null ); TestPhysicalOperationProviders physicalOperationProviders = testOperationProviders(testDataset); @@ -431,6 +433,7 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { session.executeOptimizedPlan( new EsqlQueryRequest(), + new EsqlExecutionInfo(), runPhase(bigArrays, physicalOperationProviders), session.optimizedPlan(analyzed), listener.delegateFailureAndWrap( @@ -567,6 +570,6 @@ protected void start(Driver driver, ActionListener driverListener) { } }; listener = ActionListener.releaseAfter(listener, () -> Releasables.close(drivers)); - runner.runToCompletion(drivers, listener.map(ignore -> new Result(physicalPlan.output(), collectedPages, List.of()))); + runner.runToCompletion(drivers, listener.map(ignore -> new Result(physicalPlan.output(), collectedPages, List.of(), null))); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 9d4a1c21c5995..a344f8d46350d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; @@ -33,9 +34,12 @@ import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xcontent.InstantiatingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -57,10 +61,13 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.concurrent.ConcurrentMap; import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.ChunkedToXContent.wrapAsToXContent; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.xpack.esql.action.EsqlQueryResponse.DROP_NULL_COLUMNS_OPTION; @@ -123,7 +130,41 @@ EsqlQueryResponse randomResponseAsync(boolean columnar, EsqlQueryResponse.Profil id = randomAlphaOfLengthBetween(1, 16); isRunning = randomBoolean(); } - return new EsqlQueryResponse(columns, values, profile, columnar, id, isRunning, async); + return new EsqlQueryResponse(columns, values, profile, columnar, id, isRunning, async, createExecutionInfo()); + } + + EsqlExecutionInfo createExecutionInfo() { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + executionInfo.overallTook(new TimeValue(5000)); + executionInfo.swapCluster( + "", + (k, v) -> new EsqlExecutionInfo.Cluster( + "", + "logs-1", + false, + EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, + 10, + 10, + 3, + 0, + new TimeValue(4444L) + ) + ); + executionInfo.swapCluster( + "remote1", + (k, v) -> new EsqlExecutionInfo.Cluster( + "remote1", + "remote1:logs-1", + true, + EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, + 12, + 12, + 5, + 0, + new TimeValue(4999L) + ) + ); + return executionInfo; } private ColumnInfoImpl randomColumnInfo() { @@ -205,21 +246,30 @@ protected EsqlQueryResponse mutateInstance(EsqlQueryResponse instance) { List cols = new ArrayList<>(instance.columns()); // keep the type the same so the values are still valid but change the name cols.set(mutCol, new ColumnInfoImpl(cols.get(mutCol).name() + "mut", cols.get(mutCol).type())); - yield new EsqlQueryResponse(cols, deepCopyOfPages(instance), instance.profile(), instance.columnar(), instance.isAsync()); + yield new EsqlQueryResponse( + cols, + deepCopyOfPages(instance), + instance.profile(), + instance.columnar(), + instance.isAsync(), + instance.getExecutionInfo() + ); } case 1 -> new EsqlQueryResponse( instance.columns(), deepCopyOfPages(instance), instance.profile(), false == instance.columnar(), - instance.isAsync() + instance.isAsync(), + instance.getExecutionInfo() ); case 2 -> new EsqlQueryResponse( instance.columns(), deepCopyOfPages(instance), randomValueOtherThan(instance.profile(), this::randomProfile), instance.columnar(), - instance.isAsync() + instance.isAsync(), + instance.getExecutionInfo() ); case 3 -> { int noPages = instance.pages().size(); @@ -233,7 +283,8 @@ yield new EsqlQueryResponse( differentPages, instance.profile(), instance.columnar(), - instance.isAsync() + instance.isAsync(), + instance.getExecutionInfo() ); } default -> throw new IllegalArgumentException(); @@ -288,8 +339,10 @@ public static class ResponseBuilder { IS_RUNNING, ObjectParser.ValueType.BOOLEAN_OR_NULL ); + parser.declareInt(constructorArg(), new ParseField("took")); parser.declareObjectArray(constructorArg(), (p, c) -> ColumnInfoImpl.fromXContent(p), new ParseField("columns")); parser.declareField(constructorArg(), (p, c) -> p.list(), new ParseField("values"), ObjectParser.ValueType.OBJECT_ARRAY); + parser.declareObject(optionalConstructorArg(), (p, c) -> parseClusters(p), new ParseField("_clusters")); PARSER = parser.build(); } @@ -300,9 +353,12 @@ public static class ResponseBuilder { public ResponseBuilder( @Nullable String asyncExecutionId, Boolean isRunning, + Integer took, List columns, - List> values + List> values, + EsqlExecutionInfo executionInfo ) { + executionInfo.overallTook(new TimeValue(took)); this.response = new EsqlQueryResponse( columns, List.of(valuesToPage(TestBlockFactory.getNonBreakingInstance(), columns, values)), @@ -310,7 +366,138 @@ public ResponseBuilder( false, asyncExecutionId, isRunning != null, - isAsync(asyncExecutionId, isRunning) + isAsync(asyncExecutionId, isRunning), + executionInfo + ); + } + + static EsqlExecutionInfo parseClusters(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + int total = -1; + int successful = -1; + int skipped = -1; + int running = 0; + int partial = 0; + int failed = 0; + ConcurrentMap clusterInfoMap = ConcurrentCollections.newConcurrentMap(); + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (EsqlExecutionInfo.TOTAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + total = parser.intValue(); + } else if (EsqlExecutionInfo.SUCCESSFUL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + successful = parser.intValue(); + } else if (EsqlExecutionInfo.SKIPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + skipped = parser.intValue(); + } else if (EsqlExecutionInfo.RUNNING_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + running = parser.intValue(); + } else if (EsqlExecutionInfo.PARTIAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + partial = parser.intValue(); + } else if (EsqlExecutionInfo.FAILED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + failed = parser.intValue(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (EsqlExecutionInfo.DETAILS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + String currentDetailsFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentDetailsFieldName = parser.currentName(); // cluster alias + if (currentDetailsFieldName.equals(EsqlExecutionInfo.LOCAL_CLUSTER_NAME_REPRESENTATION)) { + currentDetailsFieldName = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + } + } else if (token == XContentParser.Token.START_OBJECT) { + EsqlExecutionInfo.Cluster c = parseCluster(currentDetailsFieldName, parser); + clusterInfoMap.put(currentDetailsFieldName, c); + } else { + parser.skipChildren(); + } + } + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + if (clusterInfoMap.isEmpty()) { + return new EsqlExecutionInfo(); + } else { + return new EsqlExecutionInfo(clusterInfoMap); + } + } + + private static EsqlExecutionInfo.Cluster parseCluster(String clusterAlias, XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + + String indexExpression = null; + String status = "running"; + long took = -1L; + // these are all from the _shards section + int totalShards = -1; + int successfulShards = -1; + int skippedShards = -1; + int failedShards = -1; + + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (EsqlExecutionInfo.Cluster.INDICES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + indexExpression = parser.text(); + } else if (EsqlExecutionInfo.Cluster.STATUS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + status = parser.text(); + } else if (EsqlExecutionInfo.TOOK.match(currentFieldName, parser.getDeprecationHandler())) { + took = parser.longValue(); + } else { + parser.skipChildren(); + } + } else if (RestActions._SHARDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (RestActions.FAILED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + failedShards = parser.intValue(); + } else if (RestActions.SUCCESSFUL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + successfulShards = parser.intValue(); + } else if (RestActions.TOTAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + totalShards = parser.intValue(); + } else if (RestActions.SKIPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + skippedShards = parser.intValue(); + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + } else { + parser.skipChildren(); + } + } + + Integer totalShardsFinal = totalShards == -1 ? null : totalShards; + Integer successfulShardsFinal = successfulShards == -1 ? null : successfulShards; + Integer skippedShardsFinal = skippedShards == -1 ? null : skippedShards; + Integer failedShardsFinal = failedShards == -1 ? null : failedShards; + TimeValue tookTimeValue = took == -1L ? null : new TimeValue(took); + return new EsqlExecutionInfo.Cluster( + clusterAlias, + indexExpression, + true, + EsqlExecutionInfo.Cluster.Status.valueOf(status.toUpperCase(Locale.ROOT)), + totalShardsFinal, + successfulShardsFinal, + skippedShardsFinal, + failedShardsFinal, + tookTimeValue ); } @@ -327,27 +514,29 @@ static EsqlQueryResponse fromXContent(XContentParser parser) { } public void testChunkResponseSizeColumnar() { + int sizeClusterDetails = 14; try (EsqlQueryResponse resp = randomResponse(true, null)) { int columnCount = resp.pages().get(0).getBlockCount(); int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount() * p.getBlockCount()).sum() + columnCount * 2; - assertChunkCount(resp, r -> 5 + bodySize); + assertChunkCount(resp, r -> 5 + sizeClusterDetails + bodySize); } try (EsqlQueryResponse resp = randomResponseAsync(true, null, true)) { int columnCount = resp.pages().get(0).getBlockCount(); int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount() * p.getBlockCount()).sum() + columnCount * 2; - assertChunkCount(resp, r -> 6 + bodySize); // is_running + assertChunkCount(resp, r -> 6 + sizeClusterDetails + bodySize); // is_running } } public void testChunkResponseSizeRows() { + int sizeClusterDetails = 14; try (EsqlQueryResponse resp = randomResponse(false, null)) { int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount()).sum(); - assertChunkCount(resp, r -> 5 + bodySize); + assertChunkCount(resp, r -> 5 + sizeClusterDetails + bodySize); } try (EsqlQueryResponse resp = randomResponseAsync(false, null, true)) { int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount()).sum(); - assertChunkCount(resp, r -> 6 + bodySize); + assertChunkCount(resp, r -> 6 + sizeClusterDetails + bodySize); } } @@ -398,7 +587,8 @@ public void testBasicXContentIdAndRunning() { false, "id-123", true, - true + true, + null ) ) { assertThat(Strings.toString(response), equalTo(""" @@ -415,7 +605,8 @@ public void testNullColumnsXContentDropNulls() { false, null, false, - false + false, + null ) ) { assertThat( @@ -444,7 +635,8 @@ public void testNullColumnsFromBuilderXContentDropNulls() { false, null, false, - false + false, + null ) ) { assertThat( @@ -468,7 +660,8 @@ private EsqlQueryResponse simple(boolean columnar, boolean async) { List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock())), null, columnar, - async + async, + null ); } @@ -491,7 +684,8 @@ public void testProfileXContent() { ) ), false, - false + false, + null ); ) { assertThat(Strings.toString(response, true, false), equalTo(""" @@ -552,7 +746,7 @@ public void testColumns() { var longBlk2 = blockFactory.newLongArrayVector(new long[] { 300L, 400L, 500L }, 3).asBlock(); var columnInfo = List.of(new ColumnInfoImpl("foo", "integer"), new ColumnInfoImpl("bar", "long")); var pages = List.of(new Page(intBlk1, longBlk1), new Page(intBlk2, longBlk2)); - try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { + try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false, null)) { assertThat(columnValues(response.column(0)), contains(10, 20, 30, 40, 50)); assertThat(columnValues(response.column(1)), contains(100L, 200L, 300L, 400L, 500L)); expectThrows(IllegalArgumentException.class, () -> response.column(-1)); @@ -564,7 +758,7 @@ public void testColumnsIllegalArg() { var intBlk1 = blockFactory.newIntArrayVector(new int[] { 10 }, 1).asBlock(); var columnInfo = List.of(new ColumnInfoImpl("foo", "integer")); var pages = List.of(new Page(intBlk1)); - try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { + try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false, null)) { expectThrows(IllegalArgumentException.class, () -> response.column(-1)); expectThrows(IllegalArgumentException.class, () -> response.column(1)); } @@ -583,7 +777,7 @@ public void testColumnsWithNull() { } var columnInfo = List.of(new ColumnInfoImpl("foo", "integer")); var pages = List.of(new Page(blk1), new Page(blk2), new Page(blk3)); - try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { + try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false, null)) { assertThat(columnValues(response.column(0)), contains(10, null, 30, null, null, 60, null, 80, 90, null)); expectThrows(IllegalArgumentException.class, () -> response.column(-1)); expectThrows(IllegalArgumentException.class, () -> response.column(2)); @@ -603,7 +797,7 @@ public void testColumnsWithMultiValue() { } var columnInfo = List.of(new ColumnInfoImpl("foo", "integer")); var pages = List.of(new Page(blk1), new Page(blk2), new Page(blk3)); - try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { + try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false, null)) { assertThat(columnValues(response.column(0)), contains(List.of(10, 20), null, List.of(40, 50), null, 70, 80, null)); expectThrows(IllegalArgumentException.class, () -> response.column(-1)); expectThrows(IllegalArgumentException.class, () -> response.column(2)); @@ -616,7 +810,7 @@ public void testRowValues() { List columns = randomList(numColumns, numColumns, this::randomColumnInfo); int noPages = randomIntBetween(1, 20); List pages = randomList(noPages, noPages, () -> randomPage(columns)); - try (var resp = new EsqlQueryResponse(columns, pages, null, false, "", false, false)) { + try (var resp = new EsqlQueryResponse(columns, pages, null, false, "", false, false, null)) { var rowValues = getValuesList(resp.rows()); var valValues = getValuesList(resp.values()); for (int i = 0; i < rowValues.size(); i++) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java index ef4fa6d51a888..a3a18d7a30b59 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java @@ -42,7 +42,8 @@ static Configuration randomConfiguration() { EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(Settings.EMPTY), StringUtils.EMPTY, randomBoolean(), - Map.of() + Map.of(), + System.nanoTime() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java index d5d5a0188e262..7af1c180fd7b9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java @@ -68,7 +68,8 @@ private Configuration randomLocaleConfig() { EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(Settings.EMPTY), "", false, - Map.of() + Map.of(), + System.nanoTime() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java index 0bc3d8d90dbd9..c8bbe03bde411 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java @@ -68,7 +68,8 @@ private Configuration randomLocaleConfig() { EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(Settings.EMPTY), "", false, - Map.of() + Map.of(), + System.nanoTime() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java index 658f396aa027c..fe1ac52427627 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java @@ -241,12 +241,12 @@ public void testPlainTextEmptyCursorWithColumns() { public void testPlainTextEmptyCursorWithoutColumns() { assertEquals( StringUtils.EMPTY, - getTextBodyContent(PLAIN_TEXT.format(req(), new EsqlQueryResponse(emptyList(), emptyList(), null, false, false))) + getTextBodyContent(PLAIN_TEXT.format(req(), new EsqlQueryResponse(emptyList(), emptyList(), null, false, false, null))) ); } private static EsqlQueryResponse emptyData() { - return new EsqlQueryResponse(singletonList(new ColumnInfoImpl("name", "keyword")), emptyList(), null, false, false); + return new EsqlQueryResponse(singletonList(new ColumnInfoImpl("name", "keyword")), emptyList(), null, false, false, null); } private static EsqlQueryResponse regularData() { @@ -278,7 +278,7 @@ private static EsqlQueryResponse regularData() { ) ); - return new EsqlQueryResponse(headers, values, null, false, false); + return new EsqlQueryResponse(headers, values, null, false, false, null); } private static EsqlQueryResponse escapedData() { @@ -299,7 +299,7 @@ private static EsqlQueryResponse escapedData() { ) ); - return new EsqlQueryResponse(headers, values, null, false, false); + return new EsqlQueryResponse(headers, values, null, false, false, null); } private static RestRequest req() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java index 273561c0348c6..c145d770409da 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.action.ColumnInfoImpl; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import java.util.Arrays; @@ -80,7 +81,8 @@ public class TextFormatterTests extends ESTestCase { ), null, randomBoolean(), - randomBoolean() + randomBoolean(), + new EsqlExecutionInfo() ); TextFormatter formatter = new TextFormatter(esqlResponse); @@ -154,7 +156,8 @@ public void testFormatWithoutHeader() { ), null, randomBoolean(), - randomBoolean() + randomBoolean(), + new EsqlExecutionInfo() ); String[] result = getTextBodyContent(new TextFormatter(response).format(false)).split("\n"); @@ -194,7 +197,8 @@ public void testVeryLongPadding() { ), null, randomBoolean(), - randomBoolean() + randomBoolean(), + new EsqlExecutionInfo() ) ).format(false) ) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index bfad9fd3d634c..0e09809d16902 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -75,7 +75,8 @@ public class EvalMapperTests extends ESTestCase { 10000, StringUtils.EMPTY, false, - Map.of() + Map.of(), + System.nanoTime() ); @ParametersFactory(argumentFormatting = "%1$s") diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index 81c012bb95fd8..272321b0f350b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -146,7 +146,8 @@ private Configuration config() { EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(null), StringUtils.EMPTY, false, - Map.of() + Map.of(), + System.nanoTime() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java index 26529a3605d38..da11a790e6f2f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java @@ -27,7 +27,9 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.junit.After; import org.junit.Before; import org.mockito.Mockito; @@ -48,8 +50,10 @@ import static org.elasticsearch.test.tasks.MockTaskManager.SPY_TASK_MANAGER_SETTING; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -89,7 +93,7 @@ private CancellableTask newTask() { ); } - private ComputeResponse randomResponse() { + private ComputeResponse randomResponse(boolean includeExecutionInfo) { int numProfiles = randomIntBetween(0, 2); List profiles = new ArrayList<>(numProfiles); for (int i = 0; i < numProfiles; i++) { @@ -105,12 +109,33 @@ private ComputeResponse randomResponse() { ) ); } - return new ComputeResponse(profiles); + if (includeExecutionInfo) { + return new ComputeResponse( + profiles, + new TimeValue(randomLongBetween(0, 50000), TimeUnit.NANOSECONDS), + 10, + 10, + randomIntBetween(0, 3), + 0 + ); + } else { + return new ComputeResponse(profiles); + } } public void testEmpty() { PlainActionFuture results = new PlainActionFuture<>(); - try (ComputeListener ignored = new ComputeListener(transportService, newTask(), results)) { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + try ( + ComputeListener ignored = ComputeListener.create( + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + transportService, + newTask(), + executionInfo, + System.nanoTime(), + results + ) + ) { assertFalse(results.isDone()); } assertTrue(results.isDone()); @@ -120,7 +145,17 @@ public void testEmpty() { public void testCollectComputeResults() { PlainActionFuture future = new PlainActionFuture<>(); List allProfiles = new ArrayList<>(); - try (ComputeListener computeListener = new ComputeListener(transportService, newTask(), future)) { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + try ( + ComputeListener computeListener = ComputeListener.create( + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + transportService, + newTask(), + executionInfo, + System.nanoTime(), + future + ) + ) { int tasks = randomIntBetween(1, 100); for (int t = 0; t < tasks; t++) { if (randomBoolean()) { @@ -131,7 +166,7 @@ public void testCollectComputeResults() { threadPool.generic() ); } else { - ComputeResponse resp = randomResponse(); + ComputeResponse resp = randomResponse(false); allProfiles.addAll(resp.getProfiles()); ActionListener subListener = computeListener.acquireCompute(); threadPool.schedule( @@ -142,11 +177,188 @@ public void testCollectComputeResults() { } } } - ComputeResponse result = future.actionGet(10, TimeUnit.SECONDS); + ComputeResponse response = future.actionGet(10, TimeUnit.SECONDS); + assertThat( + response.getProfiles().stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum)), + equalTo(allProfiles.stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum))) + ); + Mockito.verifyNoInteractions(transportService.getTaskManager()); + } + + /** + * Tests the acquireCompute functionality running on the querying ("local") cluster, that is waiting upon + * a ComputeResponse from a remote cluster. The acquireCompute code under test should fill in the + * {@link EsqlExecutionInfo.Cluster} with the information in the ComputeResponse from the remote cluster. + */ + public void testAcquireComputeCCSListener() { + PlainActionFuture future = new PlainActionFuture<>(); + List allProfiles = new ArrayList<>(); + String remoteAlias = "rc1"; + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + executionInfo.swapCluster(remoteAlias, (k, v) -> new EsqlExecutionInfo.Cluster(remoteAlias, "logs*", false)); + try ( + ComputeListener computeListener = ComputeListener.create( + // 'whereRunning' for this test is the local cluster, waiting for a response from the remote cluster + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + transportService, + newTask(), + executionInfo, + System.nanoTime(), + future + ) + ) { + int tasks = randomIntBetween(1, 5); + for (int t = 0; t < tasks; t++) { + ComputeResponse resp = randomResponse(true); + allProfiles.addAll(resp.getProfiles()); + // Use remoteAlias here to indicate what remote cluster alias the listener is waiting to hear back from + ActionListener subListener = computeListener.acquireCompute(remoteAlias); + threadPool.schedule( + ActionRunnable.wrap(subListener, l -> l.onResponse(resp)), + TimeValue.timeValueNanos(between(0, 100)), + threadPool.generic() + ); + } + } + ComputeResponse response = future.actionGet(10, TimeUnit.SECONDS); assertThat( - result.getProfiles().stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum)), + response.getProfiles().stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum)), equalTo(allProfiles.stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum))) ); + + assertTrue(executionInfo.isCrossClusterSearch()); + EsqlExecutionInfo.Cluster rc1Cluster = executionInfo.getCluster(remoteAlias); + assertThat(rc1Cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(rc1Cluster.getTotalShards(), equalTo(10)); + assertThat(rc1Cluster.getSuccessfulShards(), equalTo(10)); + assertThat(rc1Cluster.getSkippedShards(), greaterThanOrEqualTo(0)); + assertThat(rc1Cluster.getSkippedShards(), lessThanOrEqualTo(3)); + assertThat(rc1Cluster.getFailedShards(), equalTo(0)); + assertThat(rc1Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + + Mockito.verifyNoInteractions(transportService.getTaskManager()); + } + + /** + * Run an acquireCompute cycle on the RemoteCluster. + * AcquireCompute will fill in the took time on the EsqlExecutionInfo (the shard info is filled in before this, + * so we just hard code them in the Cluster in this test) and then a ComputeResponse will be created in the refs + * listener and returned with the shard and took time info. + */ + public void testAcquireComputeRunningOnRemoteClusterFillsInTookTime() { + PlainActionFuture future = new PlainActionFuture<>(); + List allProfiles = new ArrayList<>(); + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + String remoteAlias = "rc1"; + executionInfo.swapCluster( + remoteAlias, + (k, v) -> new EsqlExecutionInfo.Cluster( + remoteAlias, + "logs*", + false, + EsqlExecutionInfo.Cluster.Status.RUNNING, + 10, + 10, + 3, + 0, + null // to be filled in the acquireCompute listener + ) + ); + try ( + ComputeListener computeListener = ComputeListener.create( + // whereRunning=remoteAlias simulates running on the remote cluster + remoteAlias, + transportService, + newTask(), + executionInfo, + System.nanoTime(), + future + ) + ) { + int tasks = randomIntBetween(1, 5); + for (int t = 0; t < tasks; t++) { + ComputeResponse resp = randomResponse(true); + allProfiles.addAll(resp.getProfiles()); + ActionListener subListener = computeListener.acquireCompute(remoteAlias); + threadPool.schedule( + ActionRunnable.wrap(subListener, l -> l.onResponse(resp)), + TimeValue.timeValueNanos(between(0, 100)), + threadPool.generic() + ); + } + } + ComputeResponse response = future.actionGet(10, TimeUnit.SECONDS); + assertThat( + response.getProfiles().stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum)), + equalTo(allProfiles.stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum))) + ); + assertThat(response.getTotalShards(), equalTo(10)); + assertThat(response.getSuccessfulShards(), equalTo(10)); + assertThat(response.getSkippedShards(), equalTo(3)); + assertThat(response.getFailedShards(), equalTo(0)); + // check that the took time was filled in on the ExecutionInfo for the remote cluster and put into the ComputeResponse to be + // sent back to the querying cluster + assertThat(response.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(executionInfo.getCluster(remoteAlias).getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(executionInfo.getCluster(remoteAlias).getTook(), equalTo(response.getTook())); + + // the status in the (remote) executionInfo will still be RUNNING, since the SUCCESSFUL status gets set on the querying + // cluster executionInfo in the acquireCompute CCS listener, NOT present in this test - see testCollectComputeResultsInCCSListener + assertThat(executionInfo.getCluster(remoteAlias).getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.RUNNING)); + + Mockito.verifyNoInteractions(transportService.getTaskManager()); + } + + /** + * Run an acquireCompute cycle on the RemoteCluster. + * AcquireCompute will fill in the took time on the EsqlExecutionInfo (the shard info is filled in before this, + * so we just hard code them in the Cluster in this test) and then a ComputeResponse will be created in the refs + * listener and returned with the shard and took time info. + */ + public void testAcquireComputeRunningOnQueryingClusterFillsInTookTime() { + PlainActionFuture future = new PlainActionFuture<>(); + List allProfiles = new ArrayList<>(); + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + String localCluster = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + // we need a remote cluster in the ExecutionInfo in order to simulate a CCS, since ExecutionInfo is only + // fully filled in for cross-cluster searches + executionInfo.swapCluster(localCluster, (k, v) -> new EsqlExecutionInfo.Cluster(localCluster, "logs*", false)); + executionInfo.swapCluster("my_remote", (k, v) -> new EsqlExecutionInfo.Cluster("my_remote", "my_remote:logs*", false)); + try ( + ComputeListener computeListener = ComputeListener.create( + // whereRunning=localCluster simulates running on the querying cluster + localCluster, + transportService, + newTask(), + executionInfo, + System.nanoTime(), + future + ) + ) { + int tasks = randomIntBetween(1, 5); + for (int t = 0; t < tasks; t++) { + ComputeResponse resp = randomResponse(true); + allProfiles.addAll(resp.getProfiles()); + ActionListener subListener = computeListener.acquireCompute(localCluster); + threadPool.schedule( + ActionRunnable.wrap(subListener, l -> l.onResponse(resp)), + TimeValue.timeValueNanos(between(0, 100)), + threadPool.generic() + ); + } + } + ComputeResponse response = future.actionGet(10, TimeUnit.SECONDS); + assertThat( + response.getProfiles().stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum)), + equalTo(allProfiles.stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum))) + ); + // check that the took time was filled in on the ExecutionInfo for the remote cluster and put into the ComputeResponse to be + // sent back to the querying cluster + assertNull("took time is not added to the ComputeResponse on the querying cluster", response.getTook()); + assertThat(executionInfo.getCluster(localCluster).getTook().millis(), greaterThanOrEqualTo(0L)); + // once all the took times have been gathered from the tasks, the refs callback will set execution status to SUCCESSFUL + assertThat(executionInfo.getCluster(localCluster).getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + Mockito.verifyNoInteractions(transportService.getTaskManager()); } @@ -160,11 +372,21 @@ public void testCancelOnFailure() throws Exception { int failedTasks = between(1, 100); PlainActionFuture rootListener = new PlainActionFuture<>(); CancellableTask rootTask = newTask(); - try (ComputeListener computeListener = new ComputeListener(transportService, rootTask, rootListener)) { + EsqlExecutionInfo execInfo = new EsqlExecutionInfo(); + try ( + ComputeListener computeListener = ComputeListener.create( + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + transportService, + rootTask, + execInfo, + System.nanoTime(), + rootListener + ) + ) { for (int i = 0; i < successTasks; i++) { ActionListener subListener = computeListener.acquireCompute(); threadPool.schedule( - ActionRunnable.wrap(subListener, l -> l.onResponse(randomResponse())), + ActionRunnable.wrap(subListener, l -> l.onResponse(randomResponse(false))), TimeValue.timeValueNanos(between(0, 100)), threadPool.generic() ); @@ -214,10 +436,14 @@ public void onFailure(Exception e) { } }; CountDownLatch latch = new CountDownLatch(1); + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); try ( - ComputeListener computeListener = new ComputeListener( + ComputeListener computeListener = ComputeListener.create( + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, transportService, newTask(), + executionInfo, + System.nanoTime(), ActionListener.runAfter(rootListener, latch::countDown) ) ) { @@ -231,7 +457,7 @@ public void onFailure(Exception e) { threadPool.generic() ); } else { - ComputeResponse resp = randomResponse(); + ComputeResponse resp = randomResponse(false); allProfiles.addAll(resp.getProfiles()); int numWarnings = randomIntBetween(1, 5); Map warnings = new HashMap<>(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/ConfigurationSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/ConfigurationSerializationTests.java index 8f0a4227e60ef..1f35bb5312b20 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/ConfigurationSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/ConfigurationSerializationTests.java @@ -102,7 +102,8 @@ protected Configuration mutateInstance(Configuration in) { resultTruncationDefaultSize, query, profile, - tables + tables, + System.nanoTime() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java new file mode 100644 index 0000000000000..8dcad2f354b26 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionTests.java @@ -0,0 +1,264 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.session; + +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.index.EsIndex; +import org.elasticsearch.xpack.esql.index.IndexResolution; +import org.elasticsearch.xpack.esql.type.EsFieldTests; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; + +public class EsqlSessionTests extends ESTestCase { + + public void testUpdateExecutionInfoWithUnavailableClusters() { + // skip_unavailable=true clusters are unavailable, both marked as SKIPPED + { + final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + final String remote1Alias = "remote1"; + final String remote2Alias = "remote2"; + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); + executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", true)); + + EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, Set.of(remote1Alias, remote2Alias)); + + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(localClusterAlias, remote1Alias, remote2Alias))); + assertNull(executionInfo.overallTook()); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); + assertThat(localCluster.getIndexExpression(), equalTo("logs*")); + assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + + EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); + assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); + assertClusterStatusAndHasNullCounts(remote1Cluster, EsqlExecutionInfo.Cluster.Status.SKIPPED); + + EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); + assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); + assertClusterStatusAndHasNullCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.SKIPPED); + } + + // skip_unavailable=false cluster is unavailable, marked as SKIPPED // TODO: in follow on PR this will change to throwing an + // Exception + { + final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + final String remote1Alias = "remote1"; + final String remote2Alias = "remote2"; + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); + executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); + + EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, Set.of(remote2Alias)); + + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(localClusterAlias, remote1Alias, remote2Alias))); + assertNull(executionInfo.overallTook()); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); + assertThat(localCluster.getIndexExpression(), equalTo("logs*")); + assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + + EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); + assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); + assertClusterStatusAndHasNullCounts(remote1Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + + EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); + assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); + assertClusterStatusAndHasNullCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.SKIPPED); + } + + // all clusters available, no Clusters in ExecutionInfo should be modified + { + final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + final String remote1Alias = "remote1"; + final String remote2Alias = "remote2"; + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); + executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); + + EsqlSession.updateExecutionInfoWithUnavailableClusters(executionInfo, Set.of()); + + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(localClusterAlias, remote1Alias, remote2Alias))); + assertNull(executionInfo.overallTook()); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); + assertThat(localCluster.getIndexExpression(), equalTo("logs*")); + assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + + EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); + assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); + assertClusterStatusAndHasNullCounts(remote1Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + + EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); + assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); + assertClusterStatusAndHasNullCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + } + } + + public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { + // all clusters present in EsIndex, so no updates to EsqlExecutionInfo should happen + { + final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + final String remote1Alias = "remote1"; + final String remote2Alias = "remote2"; + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); + executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); + + EsIndex esIndex = new EsIndex( + "logs*,remote1:*,remote2:mylogs1,remote2:mylogs2,remote2:logs*", + randomMapping(), + Map.of( + "logs-a", + IndexMode.STANDARD, + "remote1:logs-a", + IndexMode.STANDARD, + "remote2:mylogs1", + IndexMode.STANDARD, + "remote2:mylogs2", + IndexMode.STANDARD, + "remote2:logs-b", + IndexMode.STANDARD + ) + ); + IndexResolution indexResolution = IndexResolution.valid(esIndex, Set.of()); + + EsqlSession.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); + assertThat(localCluster.getIndexExpression(), equalTo("logs*")); + assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + + EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); + assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); + assertClusterStatusAndHasNullCounts(remote1Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + + EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); + assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); + assertClusterStatusAndHasNullCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + } + + // remote1 is missing from EsIndex info, so it should be updated and marked as SKIPPED with 0 total shards, 0 took time, etc. + { + final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + final String remote1Alias = "remote1"; + final String remote2Alias = "remote2"; + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); + executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); + + EsIndex esIndex = new EsIndex( + "logs*,remote2:mylogs1,remote2:mylogs2,remote2:logs*", + randomMapping(), + Map.of( + "logs-a", + IndexMode.STANDARD, + "remote2:mylogs1", + IndexMode.STANDARD, + "remote2:mylogs2", + IndexMode.STANDARD, + "remote2:logs-b", + IndexMode.STANDARD + ) + ); + IndexResolution indexResolution = IndexResolution.valid(esIndex, Set.of()); + + EsqlSession.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); + assertThat(localCluster.getIndexExpression(), equalTo("logs*")); + assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + + EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); + assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); + assertThat(remote1Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(remote1Cluster.getTook().millis(), equalTo(0L)); + assertThat(remote1Cluster.getTotalShards(), equalTo(0)); + assertThat(remote1Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote1Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote1Cluster.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); + assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); + assertClusterStatusAndHasNullCounts(remote2Cluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + } + + // all remotes are missing from EsIndex info, so they should be updated and marked as SKIPPED with 0 total shards, 0 took time, etc. + { + final String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + final String remote1Alias = "remote1"; + final String remote2Alias = "remote2"; + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); + executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); + + EsIndex esIndex = new EsIndex( + "logs*,remote2:mylogs1,remote2:mylogs2,remote2:logs*", + randomMapping(), + Map.of("logs-a", IndexMode.STANDARD) + ); + IndexResolution indexResolution = IndexResolution.valid(esIndex, Set.of(remote1Alias)); + + EsqlSession.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(localClusterAlias); + assertThat(localCluster.getIndexExpression(), equalTo("logs*")); + assertClusterStatusAndHasNullCounts(localCluster, EsqlExecutionInfo.Cluster.Status.RUNNING); + + EsqlExecutionInfo.Cluster remote1Cluster = executionInfo.getCluster(remote1Alias); + assertThat(remote1Cluster.getIndexExpression(), equalTo("*")); + assertThat(remote1Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(remote1Cluster.getTook().millis(), equalTo(0L)); + assertThat(remote1Cluster.getTotalShards(), equalTo(0)); + assertThat(remote1Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote1Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote1Cluster.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(remote2Alias); + assertThat(remote2Cluster.getIndexExpression(), equalTo("mylogs1,mylogs2,logs*")); + assertThat(remote2Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); + assertThat(remote2Cluster.getTook().millis(), equalTo(0L)); + assertThat(remote2Cluster.getTotalShards(), equalTo(0)); + assertThat(remote2Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote2Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote2Cluster.getFailedShards(), equalTo(0)); + } + } + + private void assertClusterStatusAndHasNullCounts(EsqlExecutionInfo.Cluster cluster, EsqlExecutionInfo.Cluster.Status status) { + assertThat(cluster.getStatus(), equalTo(status)); + assertNull(cluster.getTook()); + assertNull(cluster.getTotalShards()); + assertNull(cluster.getSuccessfulShards()); + assertNull(cluster.getSkippedShards()); + assertNull(cluster.getFailedShards()); + } + + private static Map randomMapping() { + int size = between(0, 10); + Map result = new HashMap<>(size); + while (result.size() < size) { + result.put(randomAlphaOfLength(5), EsFieldTests.randomAnyEsField(1)); + } + return result; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverTests.java new file mode 100644 index 0000000000000..51497b5ca5093 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.session; + +import org.apache.lucene.index.CorruptIndexException; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.NoSeedNodeLeftException; +import org.elasticsearch.transport.NoSuchRemoteClusterException; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; + +public class IndexResolverTests extends ESTestCase { + + public void testDetermineUnavailableRemoteClusters() { + // two clusters, both "remote unavailable" type exceptions + { + List failures = new ArrayList<>(); + failures.add(new FieldCapabilitiesFailure(new String[] { "remote2:mylogs1" }, new NoSuchRemoteClusterException("remote2"))); + failures.add( + new FieldCapabilitiesFailure( + new String[] { "remote1:foo", "remote1:bar" }, + new IllegalStateException("Unable to open any connections") + ) + ); + + Set unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters, equalTo(Set.of("remote1", "remote2"))); + } + + // one cluster with "remote unavailable" with two failures + { + List failures = new ArrayList<>(); + failures.add(new FieldCapabilitiesFailure(new String[] { "remote2:mylogs1" }, new NoSuchRemoteClusterException("remote2"))); + failures.add(new FieldCapabilitiesFailure(new String[] { "remote2:mylogs1" }, new NoSeedNodeLeftException("no seed node"))); + + Set unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters, equalTo(Set.of("remote2"))); + } + + // two clusters, one "remote unavailable" type exceptions and one with another type + { + List failures = new ArrayList<>(); + failures.add(new FieldCapabilitiesFailure(new String[] { "remote1:mylogs1" }, new CorruptIndexException("foo", "bar"))); + failures.add( + new FieldCapabilitiesFailure( + new String[] { "remote2:foo", "remote2:bar" }, + new IllegalStateException("Unable to open any connections") + ) + ); + Set unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters, equalTo(Set.of("remote2"))); + } + + // one cluster1 with exception not known to indicate "remote unavailable" + { + List failures = new ArrayList<>(); + failures.add(new FieldCapabilitiesFailure(new String[] { "remote1:mylogs1" }, new RuntimeException("foo"))); + Set unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters, equalTo(Set.of())); + } + + // empty failures list + { + List failures = new ArrayList<>(); + Set unavailableClusters = IndexResolver.determineUnavailableRemoteClusters(failures); + assertThat(unavailableClusters, equalTo(Set.of())); + } + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java index cef04727bb8ed..adc449bfc092e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java @@ -8,18 +8,22 @@ package org.elasticsearch.xpack.esql.stats; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.fieldcaps.IndexFieldCapabilities; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.indices.IndicesExpressionGrouper; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.VerificationException; +import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.action.EsqlResolveFieldsAction; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; @@ -106,17 +110,31 @@ public void testFailedMetric() { // test a failed query: xyz field doesn't exist request.query("from test | stats m = max(xyz)"); BiConsumer> runPhase = (p, r) -> fail("this shouldn't happen"); - planExecutor.esql(request, randomAlphaOfLength(10), EsqlTestUtils.TEST_CFG, enrichResolver, runPhase, new ActionListener<>() { - @Override - public void onResponse(Result result) { - fail("this shouldn't happen"); + IndicesExpressionGrouper groupIndicesByCluster = (indicesOptions, indexExpressions) -> Map.of( + "", + new OriginalIndices(new String[] { "test" }, IndicesOptions.DEFAULT) + ); + + planExecutor.esql( + request, + randomAlphaOfLength(10), + EsqlTestUtils.TEST_CFG, + enrichResolver, + new EsqlExecutionInfo(), + groupIndicesByCluster, + runPhase, + new ActionListener<>() { + @Override + public void onResponse(Result result) { + fail("this shouldn't happen"); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, instanceOf(VerificationException.class)); + } } - - @Override - public void onFailure(Exception e) { - assertThat(e, instanceOf(VerificationException.class)); - } - }); + ); // check we recorded the failure and that the query actually came assertEquals(1, planExecutor.metrics().stats().get("queries._all.failed")); @@ -126,15 +144,24 @@ public void onFailure(Exception e) { // fix the failing query: foo field does exist request.query("from test | stats m = max(foo)"); runPhase = (p, r) -> r.onResponse(null); - planExecutor.esql(request, randomAlphaOfLength(10), EsqlTestUtils.TEST_CFG, enrichResolver, runPhase, new ActionListener<>() { - @Override - public void onResponse(Result result) {} - - @Override - public void onFailure(Exception e) { - fail("this shouldn't happen"); + planExecutor.esql( + request, + randomAlphaOfLength(10), + EsqlTestUtils.TEST_CFG, + enrichResolver, + new EsqlExecutionInfo(), + groupIndicesByCluster, + runPhase, + new ActionListener<>() { + @Override + public void onResponse(Result result) {} + + @Override + public void onFailure(Exception e) { + fail("this shouldn't happen"); + } } - }); + ); // check the new metrics assertEquals(1, planExecutor.metrics().stats().get("queries._all.failed")); diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index d76490e885592..807986b08c4d3 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.test.MapMatcher; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.RestTestLegacyFeatures; @@ -1062,9 +1063,14 @@ public void testDisableFieldNameField() throws IOException { }"""); // {"columns":[{"name":"dv","type":"keyword"},{"name":"no_dv","type":"keyword"}],"values":[["test",null]]} try { + Map result = entityAsMap(client().performRequest(esql)); + MapMatcher mapMatcher = matchesMap(); + if (result.get("took") != null) { + mapMatcher = mapMatcher.entry("took", ((Integer) result.get("took")).intValue()); + } assertMap( - entityAsMap(client().performRequest(esql)), - matchesMap().entry( + result, + mapMatcher.entry( "columns", List.of(Map.of("name", "dv", "type", "keyword"), Map.of("name", "no_dv", "type", "keyword")) ).entry("values", List.of(List.of("test", "test")))