Skip to content

Commit

Permalink
ESQL: Support "capabilites" in the csv-spec tests (#108464)
Browse files Browse the repository at this point in the history
This flips the csv-spec construct called `required_feature:` from using
"cluster features" to using "cluster capabilities". "Features" are a
"heavy" concept that live in the cluster state and should be used for
quickly checking things on the local node. "Capabilities" are fairly
fluid list of strings that live on each node and are calculated on the
fly so much nicer for testing.

This adds all existing "cluster features" for esql as "cluster
capabilities" for the ESQL `_query` and `_query/async` actions. The
tests just check that.

In a follow-up change I'll replace the syntax `required_feature:` with
`required_capability:`.

Our esql capabilities all starts with `esql.` - but capabilities are
naturally scoped to the endpoint. So I've removed the `esql.` from the
capabilities we add.
  • Loading branch information
nik9000 committed May 10, 2024
1 parent cb43573 commit 7cc4335
Show file tree
Hide file tree
Showing 7 changed files with 113 additions and 19 deletions.
Expand Up @@ -67,7 +67,7 @@ public class MultiClusterSpecIT extends EsqlSpecTestCase {
public static TestRule clusterRule = RuleChain.outerRule(remoteCluster).around(localCluster);

private static TestFeatureService remoteFeaturesService;
private static RestClient remoteFeaturesServiceClient;
private static RestClient remoteClusterClient;

@ParametersFactory(argumentFormatting = "%2$s.%3$s")
public static List<Object[]> readScriptSpec() throws Exception {
Expand Down Expand Up @@ -95,30 +95,34 @@ public MultiClusterSpecIT(String fileName, String groupName, String testName, In
@Override
protected void shouldSkipTest(String testName) throws IOException {
super.shouldSkipTest(testName);
for (String feature : testCase.requiredFeatures) {
assumeTrue("Test " + testName + " requires " + feature, remoteFeaturesService().clusterHasFeature(feature));
}
checkCapabilities(remoteClusterClient(), remoteFeaturesService(), testName, testCase);
assumeFalse("can't test with _index metadata", hasIndexMetadata(testCase.query));
assumeTrue("Test " + testName + " is skipped on " + Clusters.oldVersion(), isEnabled(testName, Clusters.oldVersion()));
}

private TestFeatureService remoteFeaturesService() throws IOException {
if (remoteFeaturesService == null) {
HttpHost[] remoteHosts = parseClusterHosts(remoteCluster.getHttpAddresses()).toArray(HttpHost[]::new);
remoteFeaturesServiceClient = super.buildClient(restAdminSettings(), remoteHosts);
var remoteNodeVersions = readVersionsFromNodesInfo(remoteFeaturesServiceClient);
var remoteNodeVersions = readVersionsFromNodesInfo(remoteClusterClient());
var semanticNodeVersions = remoteNodeVersions.stream()
.map(ESRestTestCase::parseLegacyVersion)
.flatMap(Optional::stream)
.collect(Collectors.toSet());
remoteFeaturesService = createTestFeatureService(getClusterStateFeatures(remoteFeaturesServiceClient), semanticNodeVersions);
remoteFeaturesService = createTestFeatureService(getClusterStateFeatures(remoteClusterClient()), semanticNodeVersions);
}
return remoteFeaturesService;
}

private RestClient remoteClusterClient() throws IOException {
if (remoteClusterClient == null) {
HttpHost[] remoteHosts = parseClusterHosts(remoteCluster.getHttpAddresses()).toArray(HttpHost[]::new);
remoteClusterClient = super.buildClient(restAdminSettings(), remoteHosts);
}
return remoteClusterClient;
}

@AfterClass
public static void closeRemoveFeaturesService() throws IOException {
IOUtils.close(remoteFeaturesServiceClient);
IOUtils.close(remoteClusterClient);
}

@Override
Expand Down
Expand Up @@ -15,6 +15,7 @@
import org.elasticsearch.Version;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.geometry.Geometry;
import org.elasticsearch.geometry.Point;
Expand All @@ -23,6 +24,7 @@
import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.TestFeatureService;
import org.elasticsearch.xcontent.XContentType;
import org.elasticsearch.xpack.esql.CsvTestUtils;
import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.RequestObjectBuilder;
Expand Down Expand Up @@ -150,12 +152,41 @@ public final void test() throws Throwable {
}

protected void shouldSkipTest(String testName) throws IOException {
for (String feature : testCase.requiredFeatures) {
assumeTrue("Test " + testName + " requires " + feature, clusterHasFeature(feature));
}
checkCapabilities(adminClient(), testFeatureService, testName, testCase);
assumeTrue("Test " + testName + " is not enabled", isEnabled(testName, Version.CURRENT));
}

protected static void checkCapabilities(RestClient client, TestFeatureService testFeatureService, String testName, CsvTestCase testCase)
throws IOException {
if (testCase.requiredCapabilities.isEmpty()) {
return;
}
try {
if (clusterHasCapability(client, "POST", "/_query", List.of(), testCase.requiredCapabilities).orElse(false)) {
return;
}
LOGGER.info("capabilities API returned false, we might be in a mixed version cluster so falling back to cluster features");
} catch (ResponseException e) {
if (e.getResponse().getStatusLine().getStatusCode() / 100 == 4) {
/*
* The node we're testing against is too old for the capabilities
* API which means it has to be pretty old. Very old capabilities
* are ALSO present in the features API, so we can check them instead.
*
* It's kind of weird that we check for *any* 400, but that's required
* because old versions of Elasticsearch return 400, not the expected
* 404.
*/
LOGGER.info("capabilities API failed, falling back to cluster features");
} else {
throw e;
}
}
for (String feature : testCase.requiredCapabilities) {
assumeTrue("Test " + testName + " requires " + feature, testFeatureService.clusterHasFeature("esql." + feature));
}
}

protected final void doTest() throws Throwable {
RequestObjectBuilder builder = new RequestObjectBuilder(randomFrom(XContentType.values()));

Expand Down
@@ -0,0 +1,48 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/

package org.elasticsearch.xpack.esql.action;

import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction;
import org.elasticsearch.xpack.esql.plugin.EsqlFeatures;

import java.util.ArrayList;
import java.util.List;
import java.util.Set;

/**
* A {@link Set} of "capabilities" supported by the {@link RestEsqlQueryAction}
* and {@link RestEsqlAsyncQueryAction} APIs. These are exposed over the
* {@link RestNodesCapabilitiesAction} and we use them to enable tests.
*/
public class EsqlCapabilities {
static final Set<String> CAPABILITIES = capabilities();

private static Set<String> capabilities() {
/*
* Add all of our cluster features without the leading "esql."
*/
List<String> caps = new ArrayList<>();
for (NodeFeature feature : new EsqlFeatures().getFeatures()) {
caps.add(cap(feature));
}
for (NodeFeature feature : new EsqlFeatures().getHistoricalFeatures().keySet()) {
caps.add(cap(feature));
}
return Set.copyOf(caps);
}

/**
* Convert a {@link NodeFeature} from {@link EsqlFeatures} into a
* capability.
*/
public static String cap(NodeFeature feature) {
assert feature.id().startsWith("esql.");
return feature.id().substring("esql.".length());
}
}
Expand Up @@ -39,6 +39,11 @@ public List<Route> routes() {
return List.of(new Route(POST, "/_query/async"));
}

@Override
public Set<String> supportedCapabilities() {
return EsqlCapabilities.CAPABILITIES;
}

@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
EsqlQueryRequest esqlRequest;
Expand Down
Expand Up @@ -39,6 +39,11 @@ public List<Route> routes() {
return List.of(new Route(POST, "/_query"));
}

@Override
public Set<String> supportedCapabilities() {
return EsqlCapabilities.CAPABILITIES;
}

@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
EsqlQueryRequest esqlRequest;
Expand Down
Expand Up @@ -107,6 +107,7 @@
import static org.elasticsearch.xpack.esql.CsvTestsDataLoader.CSV_DATASET_MAP;
import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER;
import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping;
import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.cap;
import static org.elasticsearch.xpack.ql.CsvSpecReader.specParser;
import static org.elasticsearch.xpack.ql.TestUtils.classpathResources;
import static org.hamcrest.Matchers.equalTo;
Expand Down Expand Up @@ -222,8 +223,8 @@ public final void test() throws Throwable {
* The csv tests support all but a few features. The unsupported features
* are tested in integration tests.
*/
assumeFalse("metadata fields aren't supported", testCase.requiredFeatures.contains(EsqlFeatures.METADATA_FIELDS.id()));
assumeFalse("enrich can't load fields in csv tests", testCase.requiredFeatures.contains(EsqlFeatures.ENRICH_LOAD.id()));
assumeFalse("metadata fields aren't supported", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METADATA_FIELDS)));
assumeFalse("enrich can't load fields in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.ENRICH_LOAD)));
doTest();
} catch (Throwable th) {
throw reworkException(th);
Expand Down
Expand Up @@ -31,7 +31,7 @@ public static class CsvSpecParser implements SpecReader.Parser {
private final StringBuilder earlySchema = new StringBuilder();
private final StringBuilder query = new StringBuilder();
private final StringBuilder data = new StringBuilder();
private final List<String> requiredFeatures = new ArrayList<>();
private final List<String> requiredCapabilities = new ArrayList<>();
private CsvTestCase testCase;

private CsvSpecParser() {}
Expand All @@ -44,16 +44,16 @@ public Object parse(String line) {
assertThat("Early schema already declared " + earlySchema, earlySchema.length(), is(0));
earlySchema.append(line.substring(SCHEMA_PREFIX.length()).trim());
} else if (line.toLowerCase(Locale.ROOT).startsWith("required_feature:")) {
requiredFeatures.add(line.substring("required_feature:".length()).trim());
requiredCapabilities.add(line.substring("required_feature:".length()).trim().replace("esql.", ""));
} else {
if (line.endsWith(";")) {
// pick up the query
testCase = new CsvTestCase();
query.append(line.substring(0, line.length() - 1).trim());
testCase.query = query.toString();
testCase.earlySchema = earlySchema.toString();
testCase.requiredFeatures = List.copyOf(requiredFeatures);
requiredFeatures.clear();
testCase.requiredCapabilities = List.copyOf(requiredCapabilities);
requiredCapabilities.clear();
earlySchema.setLength(0);
query.setLength(0);
}
Expand Down Expand Up @@ -111,7 +111,7 @@ public static class CsvTestCase {
private final List<String> expectedWarningsRegexString = new ArrayList<>();
private final List<Pattern> expectedWarningsRegex = new ArrayList<>();
public boolean ignoreOrder;
public List<String> requiredFeatures = List.of();
public List<String> requiredCapabilities = List.of();

// The emulated-specific warnings must always trail the non-emulated ones, if these are present. Otherwise, the closing bracket
// would need to be changed to a less common sequence (like `]#` maybe).
Expand Down

0 comments on commit 7cc4335

Please sign in to comment.