From be4d809b39346bfb2acd14721977314d26161031 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Fri, 29 Jul 2022 11:27:11 +0100 Subject: [PATCH 001/265] Add generateStubReleaseNotes task (#88933) When we feature freeze Elasticsearch, we need to create stub documentation for the next version. This turns out to be as simple as running the usual `generateReleaseNotes` task without any inputs. --- .../internal/release/ReleaseToolsPlugin.java | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java index fb6ddc5e1be16..c93320dc2b498 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java @@ -12,6 +12,7 @@ import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitTaskPlugin; import org.elasticsearch.gradle.internal.precommit.ValidateYamlAgainstSchemaTask; +import org.gradle.api.Action; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.file.Directory; @@ -22,6 +23,7 @@ import org.gradle.api.tasks.util.PatternSet; import java.io.File; +import java.util.function.Function; import javax.inject.Inject; @@ -67,10 +69,14 @@ public void apply(Project project) { task.dependsOn(validateChangelogsAgainstYamlTask); }); - project.getTasks().register("generateReleaseNotes", GenerateReleaseNotesTask.class).configure(task -> { + final Function> configureGenerateTask = shouldConfigureYamlFiles -> task -> { task.setGroup("Documentation"); - task.setDescription("Generates release notes from changelog files held in this checkout"); - task.setChangelogs(yamlFiles); + if (shouldConfigureYamlFiles) { + task.setChangelogs(yamlFiles); + task.setDescription("Generates release notes from changelog files held in this checkout"); + } else { + task.setDescription("Generates stub release notes e.g. after feature freeze"); + } task.setReleaseNotesIndexTemplate(projectDirectory.file(RESOURCES + "templates/release-notes-index.asciidoc")); task.setReleaseNotesIndexFile(projectDirectory.file("docs/reference/release-notes.asciidoc")); @@ -100,7 +106,12 @@ public void apply(Project project) { task.setMigrationIndexFile(projectDirectory.file("docs/reference/migration/index.asciidoc")); task.dependsOn(validateChangelogsTask); - }); + }; + + project.getTasks().register("generateReleaseNotes", GenerateReleaseNotesTask.class).configure(configureGenerateTask.apply(true)); + project.getTasks() + .register("generateStubReleaseNotes", GenerateReleaseNotesTask.class) + .configure(configureGenerateTask.apply(false)); project.getTasks().register("pruneChangelogs", PruneChangelogsTask.class).configure(task -> { task.setGroup("Documentation"); From bd624ba2dc53cbaf1bdc3b6240b568fa263a8603 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 29 Jul 2022 12:42:53 +0200 Subject: [PATCH 002/265] Speed up operations on BlobStoreIndexShardSnapshots (#88912) This fixes a couple of slow points in `BlobStoreIndexShardSnapshots`, which become performance critical when working with large repositories. 1. Fix `physicalFiles` containing the same `FileInfo` instances repeatedly for every snapshot that holds the file. Without this fix the map can hold lists as long as the number of snapshots for the shard for files common to all snapshots of the shard. Also, only lazy build the map since it's only used during snapshotting and internalize the logic into `BlobStoreIndexShardSnapshots` so we don't have to bother with wrapping as unmodifiable. 2. Add efficient copy constructors for all 3 operations on the shard to avoid expensive looping over all snapshots and their files in many cases. 3. Use list instead of redundant map in deserialization, we weren't using the map for any deduplication anyways and are safe here thanks to Jackson's duplicate name detection --- .../BlobStoreIndexShardSnapshots.java | 132 +++++++++++------- .../blobstore/BlobStoreRepository.java | 35 ++--- 2 files changed, 91 insertions(+), 76 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index f5f95b25a684d..113d3c8f28a19 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -10,20 +10,25 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; +import org.elasticsearch.index.store.StoreFileMetadata; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.IdentityHashMap; import java.util.Iterator; import java.util.List; import java.util.Map; - -import static java.util.Collections.unmodifiableMap; +import java.util.Set; +import java.util.stream.Collectors; /** * Contains information about all snapshots for the given shard in repository @@ -33,54 +38,53 @@ */ public class BlobStoreIndexShardSnapshots implements Iterable, ToXContentFragment { - public static final BlobStoreIndexShardSnapshots EMPTY = new BlobStoreIndexShardSnapshots(Collections.emptyList()); + public static final BlobStoreIndexShardSnapshots EMPTY = new BlobStoreIndexShardSnapshots(Map.of(), List.of()); private final List shardSnapshots; private final Map files; - private final Map> physicalFiles; - public BlobStoreIndexShardSnapshots(List shardSnapshots) { + private BlobStoreIndexShardSnapshots(Map files, List shardSnapshots) { this.shardSnapshots = List.copyOf(shardSnapshots); - // Map between blob names and file info + this.files = files; + } + + public BlobStoreIndexShardSnapshots withRetainedSnapshots(Set retainedSnapshots) { + if (retainedSnapshots.isEmpty()) { + return EMPTY; + } + final var survivingSnapshotNames = retainedSnapshots.stream().map(SnapshotId::getName).collect(Collectors.toSet()); + final ArrayList updatedSnapshots = new ArrayList<>(survivingSnapshotNames.size()); Map newFiles = new HashMap<>(); - // Map between original physical names and file info - Map> physicalFiles = new HashMap<>(); for (SnapshotFiles snapshot : shardSnapshots) { - // First we build map between filenames in the repo and their original file info - // this map will be used in the next loop + if (survivingSnapshotNames.contains(snapshot.snapshot()) == false) { + continue; + } + updatedSnapshots.add(snapshot); for (FileInfo fileInfo : snapshot.indexFiles()) { FileInfo oldFile = newFiles.put(fileInfo.name(), fileInfo); assert oldFile == null || oldFile.isSame(fileInfo); } - // We are doing it in two loops here so we keep only one copy of the fileInfo per blob - // the first loop de-duplicates fileInfo objects that were loaded from different snapshots but refer to - // the same blob - for (FileInfo fileInfo : snapshot.indexFiles()) { - physicalFiles.computeIfAbsent(fileInfo.physicalName(), k -> new ArrayList<>()).add(newFiles.get(fileInfo.name())); - } } - Map> mapBuilder = new HashMap<>(); - for (Map.Entry> entry : physicalFiles.entrySet()) { - mapBuilder.put(entry.getKey(), List.copyOf(entry.getValue())); - } - this.physicalFiles = unmodifiableMap(mapBuilder); - this.files = unmodifiableMap(newFiles); + return new BlobStoreIndexShardSnapshots(newFiles, updatedSnapshots); } - private BlobStoreIndexShardSnapshots(Map files, List shardSnapshots) { - this.shardSnapshots = shardSnapshots; - this.files = files; - Map> physicalFiles = new HashMap<>(); - for (SnapshotFiles snapshot : shardSnapshots) { - for (FileInfo fileInfo : snapshot.indexFiles()) { - physicalFiles.computeIfAbsent(fileInfo.physicalName(), k -> new ArrayList<>()).add(files.get(fileInfo.name())); + public BlobStoreIndexShardSnapshots withAddedSnapshot(SnapshotFiles snapshotFiles) { + Map updatedFiles = null; + for (FileInfo fileInfo : snapshotFiles.indexFiles()) { + final FileInfo known = files.get(fileInfo.name()); + if (known == null) { + if (updatedFiles == null) { + updatedFiles = new HashMap<>(files); + } + updatedFiles.put(fileInfo.name(), fileInfo); + } else { + assert fileInfo.isSame(known); } } - Map> mapBuilder = new HashMap<>(); - for (Map.Entry> entry : physicalFiles.entrySet()) { - mapBuilder.put(entry.getKey(), List.copyOf(entry.getValue())); - } - this.physicalFiles = unmodifiableMap(mapBuilder); + return new BlobStoreIndexShardSnapshots( + updatedFiles == null ? files : updatedFiles, + CollectionUtils.appendToCopyNoNullElements(shardSnapshots, snapshotFiles) + ); } /** @@ -102,7 +106,10 @@ public BlobStoreIndexShardSnapshots withClone(String source, String target) { if (sourceFiles == null) { throw new IllegalArgumentException("unknown source [" + source + "]"); } - return new BlobStoreIndexShardSnapshots(CollectionUtils.appendToCopy(shardSnapshots, sourceFiles.withSnapshotName(target))); + return new BlobStoreIndexShardSnapshots( + files, + CollectionUtils.appendToCopyNoNullElements(shardSnapshots, sourceFiles.withSnapshotName(target)) + ); } /** @@ -114,14 +121,40 @@ public List snapshots() { return this.shardSnapshots; } + // index of Lucene file name to collection of file info in the repository + // lazy computed because building this is map is rather expensive and only needed for the snapshot create operation + private Map> physicalFiles; + /** - * Finds reference to a snapshotted file by its original name + * Finds reference to a snapshotted file by its {@link StoreFileMetadata} * - * @param physicalName original name - * @return a list of file infos that match specified physical file or null if the file is not present in any of snapshots + * @param storeFileMetadata store file metadata to find file info for + * @return the file info that matches the specified physical file or null if the file is not present in any of snapshots */ - public List findPhysicalIndexFiles(String physicalName) { - return physicalFiles.get(physicalName); + public FileInfo findPhysicalIndexFile(StoreFileMetadata storeFileMetadata) { + var p = this.physicalFiles; + if (p == null) { + p = new HashMap<>(); + for (SnapshotFiles snapshot : shardSnapshots) { + for (FileInfo fileInfo : snapshot.indexFiles()) { + // we use identity hash set since we lookup all instances from the same map and thus equality == instance equality + // and we don't want to add the same file to the map multiple times + p.computeIfAbsent(fileInfo.physicalName(), k -> Collections.newSetFromMap(new IdentityHashMap<>())) + .add(files.get(fileInfo.name())); + } + } + physicalFiles = p; + } + final var found = p.get(storeFileMetadata.name()); + if (found == null) { + return null; + } + for (FileInfo fileInfo : found) { + if (fileInfo.isSame(storeFileMetadata)) { + return fileInfo; + } + } + return null; } /** @@ -228,7 +261,8 @@ public static BlobStoreIndexShardSnapshots fromXContent(XContentParser parser) t if (token == null) { // New parser token = parser.nextToken(); } - Map> snapshotsMap = new HashMap<>(); + // list of tuples of snapshot name and file ids in the snapshot + List>> snapshotsAndFiles = new ArrayList<>(); Map historyUUIDs = new HashMap<>(); Map files = new HashMap<>(); XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); @@ -256,7 +290,9 @@ public static BlobStoreIndexShardSnapshots fromXContent(XContentParser parser) t token = parser.nextToken(); if (Fields.FILES.equals(currentFieldName)) { if (token == XContentParser.Token.START_ARRAY) { - snapshotsMap.put(snapshot, XContentParserUtils.parseList(parser, XContentParser::text)); + snapshotsAndFiles.add( + Tuple.tuple(snapshot, XContentParserUtils.parseList(parser, XContentParser::text)) + ); } } else if (Fields.SHARD_STATE_ID.equals(currentFieldName)) { historyUUIDs.put(snapshot, parser.text()); @@ -268,19 +304,17 @@ public static BlobStoreIndexShardSnapshots fromXContent(XContentParser parser) t } } - List snapshots = new ArrayList<>(snapshotsMap.size()); - for (Map.Entry> entry : snapshotsMap.entrySet()) { + List snapshots = new ArrayList<>(snapshotsAndFiles.size()); + for (Tuple> entry : snapshotsAndFiles) { List fileInfosBuilder = new ArrayList<>(); - for (String file : entry.getValue()) { + for (String file : entry.v2()) { FileInfo fileInfo = files.get(file); assert fileInfo != null; fileInfosBuilder.add(fileInfo); } - snapshots.add( - new SnapshotFiles(entry.getKey(), Collections.unmodifiableList(fileInfosBuilder), historyUUIDs.get(entry.getKey())) - ); + snapshots.add(new SnapshotFiles(entry.v1(), Collections.unmodifiableList(fileInfosBuilder), historyUUIDs.get(entry.v1()))); } - return new BlobStoreIndexShardSnapshots(files, Collections.unmodifiableList(snapshots)); + return new BlobStoreIndexShardSnapshots(files, snapshots); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 3b6f61aad09ee..69c01a51b337b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -2696,18 +2696,7 @@ public void snapshotShard(SnapshotShardContext context) { logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName); final StoreFileMetadata md = metadataFromStore.get(fileName); - BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null; - List filesInfo = snapshots.findPhysicalIndexFiles(fileName); - if (filesInfo != null) { - for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) { - if (fileInfo.isSame(md)) { - // a commit point file with the same name, size and checksum was already copied to repository - // we will reuse it for this snapshot - existingFileInfo = fileInfo; - break; - } - } - } + BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = snapshots.findPhysicalIndexFile(md); // We can skip writing blobs where the metadata hash is equal to the blob's contents because we store the hash/contents // directly in the shard level metadata in this case @@ -2733,6 +2722,8 @@ public void snapshotShard(SnapshotShardContext context) { filesInShardMetadataSize += md.length(); } } else { + // a commit point file with the same name, size and checksum was already copied to repository + // we will reuse it for this snapshot indexCommitPointFiles.add(existingFileInfo); } } @@ -2756,12 +2747,9 @@ public void snapshotShard(SnapshotShardContext context) { final boolean writeShardGens = SnapshotsService.useShardGenerations(context.getRepositoryMetaVersion()); final boolean writeFileInfoWriterUUID = SnapshotsService.includeFileInfoWriterUUID(context.getRepositoryMetaVersion()); // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones - List newSnapshotsList = new ArrayList<>(); - newSnapshotsList.add(new SnapshotFiles(snapshotId.getName(), indexCommitPointFiles, context.stateIdentifier())); - for (SnapshotFiles point : snapshots) { - newSnapshotsList.add(point); - } - final BlobStoreIndexShardSnapshots updatedBlobStoreIndexShardSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); + final BlobStoreIndexShardSnapshots updatedBlobStoreIndexShardSnapshots = snapshots.withAddedSnapshot( + new SnapshotFiles(snapshotId.getName(), indexCommitPointFiles, context.stateIdentifier()) + ); final Runnable afterWriteSnapBlob; if (writeShardGens) { // When using shard generations we can safely write the index-${uuid} blob before writing out any of the actual data @@ -3253,19 +3241,12 @@ private ShardSnapshotMetaDeleteResult deleteFromShardSnapshotMeta( long indexGeneration ) { // Build a list of snapshots that should be preserved - List newSnapshotsList = new ArrayList<>(); - final Set survivingSnapshotNames = survivingSnapshots.stream().map(SnapshotId::getName).collect(Collectors.toSet()); - for (SnapshotFiles point : snapshots) { - if (survivingSnapshotNames.contains(point.snapshot())) { - newSnapshotsList.add(point); - } - } + final BlobStoreIndexShardSnapshots updatedSnapshots = snapshots.withRetainedSnapshots(survivingSnapshots); ShardGeneration writtenGeneration = null; try { - if (newSnapshotsList.isEmpty()) { + if (updatedSnapshots.snapshots().isEmpty()) { return new ShardSnapshotMetaDeleteResult(indexId, snapshotShardId, ShardGenerations.DELETED_SHARD_GEN, blobs); } else { - final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); if (indexGeneration < 0L) { writtenGeneration = ShardGeneration.newGeneration(); INDEX_SHARD_SNAPSHOTS_FORMAT.write(updatedSnapshots, shardContainer, writtenGeneration.toBlobNamePart(), compress); From 6b8dab7807fa56a228a39b77c6761488e5c1a8e7 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 29 Jul 2022 07:28:25 -0400 Subject: [PATCH 003/265] [ML] fix BERT and MPNet tokenization bug when handling unicode accents (#88907) When handling unicode accents, it may have been that BERT tokenizations removed the incorrect characters. This would result in an exceptionally strange result and possibly an error. closes #88900 --- docs/changelog/88907.yaml | 6 ++++++ .../deployment/DeploymentManager.java | 2 +- .../xpack/ml/inference/nlp/Vocabulary.java | 2 ++ .../nlp/tokenizers/BasicTokenFilter.java | 21 ++++++++++++------- .../nlp/tokenizers/BasicTokenFilterTests.java | 1 + 5 files changed, 24 insertions(+), 8 deletions(-) create mode 100644 docs/changelog/88907.yaml diff --git a/docs/changelog/88907.yaml b/docs/changelog/88907.yaml new file mode 100644 index 0000000000000..2d9cab22424ca --- /dev/null +++ b/docs/changelog/88907.yaml @@ -0,0 +1,6 @@ +pr: 88907 +summary: Fix BERT and MPNet tokenization bug when handling unicode accents +area: Machine Learning +type: bug +issues: + - 88900 diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index 0d917debe3d02..6b984628f3b7b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -206,7 +206,7 @@ Vocabulary parseVocabularyDocLeniently(SearchHit hit) throws IOException { stream ) ) { - return Vocabulary.createParser(true).apply(parser, null); + return Vocabulary.PARSER.apply(parser, null); } catch (IOException e) { logger.error(() -> "failed to parse trained model vocabulary [" + hit.getId() + "]", e); throw e; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java index 7665c61b76ce5..6deb9a8b6d0fb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java @@ -45,6 +45,8 @@ public static ConstructingObjectParser createParser(boolean ig return parser; } + public static ConstructingObjectParser PARSER = createParser(true); + private final List vocab; private final List merges; private final String modelId; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilter.java index 8828efa4af1eb..3be4eded99894 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilter.java @@ -140,25 +140,30 @@ public boolean incrementToken() throws IOException { return false; } - void stripAccent() { + private void stripAccent() { accentBuffer.setLength(0); + boolean changed = false; if (normalizer.quickCheck(termAtt) != Normalizer.YES) { normalizer.normalize(termAtt, accentBuffer); + changed = true; + } else { + accentBuffer.append(termAtt); } List badIndices = new ArrayList<>(); List charCount = new ArrayList<>(); int index = 0; + int deletedIndices = 0; for (PrimitiveIterator.OfInt it = accentBuffer.codePoints().iterator(); it.hasNext();) { int cp = it.next(); if (Character.getType(cp) == Character.NON_SPACING_MARK) { - badIndices.add(index); + // When we iterate to delete accents, we need to account for previously deleted ones + badIndices.add(index - deletedIndices); charCount.add(Character.charCount(cp)); + deletedIndices++; + changed = true; } index++; } - if (badIndices.isEmpty()) { - return; - } for (int i = 0; i < badIndices.size(); i++) { int badIndex = badIndices.get(i); int count = charCount.get(i); @@ -166,12 +171,14 @@ void stripAccent() { accentBuffer.deleteCharAt(badIndex); } } - termAtt.setEmpty().append(accentBuffer); + if (changed) { + termAtt.setEmpty().append(accentBuffer); + } } private LinkedList split() { LinkedList splits = new LinkedList<>(); - int startOffset = offsetAtt.startOffset(); + final int startOffset = offsetAtt.startOffset(); int charIndex = 0; int lastCharSplit = 0; for (PrimitiveIterator.OfInt it = termAtt.codePoints().iterator(); it.hasNext();) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilterTests.java index 9199e2c776f2e..a3288baf65968 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilterTests.java @@ -67,6 +67,7 @@ public void testSplitCJK() throws Exception { public void testStripAccents() throws Exception { Analyzer analyzer = basicAnalyzerFromSettings(true, true, List.of("[UNK]")); assertAnalyzesToNoCharFilter(analyzer, "HäLLo how are you", new String[] { "HaLLo", "how", "are", "you" }); + assertAnalyzesToNoCharFilter(analyzer, "ÎÎÎÏνÎÎÎαοÏ", new String[] { "IIIII½IIII±I", "¿", "I" }); } private static void assertAnalyzesToNoCharFilter(Analyzer a, String input, String[] output) throws IOException { From 9f2b96d82e7a67aa6dddabf0fd2ae310749c39fd Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 29 Jul 2022 11:14:51 -0400 Subject: [PATCH 004/265] [ML] add sentence-piece unigram tokenizer (#88858) Add internal unigram tokenizer. This tokenizer is the same that XLM-Roberta utilizes, along with many other cross-lingual models and tasks. This does not fully integrate (adding configuration, integrating into nlp tasks, etc.). But instead is just the internal tokenization and some tests showing how it runs with a precompiled charsmap. --- .../nlp/tokenizers/DelimitedToken.java | 22 + .../PrecompiledCharMapNormalizer.java | 95 +++- .../nlp/tokenizers/UnigramTokenizer.java | 493 ++++++++++++++++++ .../PrecompiledCharMapNormalizerTests.java | 55 +- .../nlp/tokenizers/UnigramTokenizerTests.java | 165 ++++++ 5 files changed, 790 insertions(+), 40 deletions(-) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizer.java create mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizerTests.java diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/DelimitedToken.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/DelimitedToken.java index ec84b1794fa84..32713997f3e8d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/DelimitedToken.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/DelimitedToken.java @@ -7,10 +7,13 @@ package org.elasticsearch.xpack.ml.inference.nlp.tokenizers; +import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; +import static org.elasticsearch.core.Strings.format; + public class DelimitedToken { static DelimitedToken mergeTokens(List tokens) { @@ -67,6 +70,25 @@ public String toString() { } public static class Encoded extends DelimitedToken { + static DelimitedToken.Encoded mergeEncodedTokens(List tokens) { + if (tokens.size() == 1) { + return tokens.get(0); + } + int startOffSet = tokens.get(0).startOffset(); + int endOffset = tokens.get(tokens.size() - 1).endOffset(); + final int encoding = tokens.get(0).encoding; + List sequences = new ArrayList<>(tokens.size()); + for (var t : tokens) { + if (t.encoding != encoding) { + throw new IllegalArgumentException( + format("all merged tokens must have the same encoding, expected [%s]; found [%s]", encoding, t.encoding) + ); + } + sequences.add(t.charSequence()); + } + return new DelimitedToken.Encoded(new MultiCharSequence(sequences), tokens.get(0).encoding, startOffSet, endOffset); + } + private final int encoding; public Encoded(CharSequence charSequence, int encoding, int startOffset, int endOffset) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java index 4470a8629bf65..f20e836fcae87 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java @@ -13,14 +13,22 @@ import com.ibm.icu.text.BreakIterator; +import org.apache.lucene.analysis.charfilter.BaseCharFilter; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; +import org.apache.lucene.util.CharsRef; +import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.UnicodeUtil; +import java.io.CharArrayReader; +import java.io.IOException; +import java.io.Reader; import java.nio.ByteBuffer; import java.nio.CharBuffer; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Base64; +import java.util.List; import java.util.Locale; import java.util.Optional; import java.util.OptionalInt; @@ -39,10 +47,15 @@ * DARTS * * - SP normalizer + * + * We implement this as a char filter to take advantage of the underlying offset correction and because normalization needs to occur before + * tokenization (just like a charfilter) */ -public class PrecompiledCharMapNormalizer { +public class PrecompiledCharMapNormalizer extends BaseCharFilter { + + record Config(int[] offsets, String utf8str) {} - static PrecompiledCharMapNormalizer fromBase64Str(String s) { + static Config fromBase64Str(String s) { int offset = 0; byte[] bytes = Base64.getDecoder().decode(s); int trieSize = ByteBuffer.wrap(bytes, offset, 4).order(java.nio.ByteOrder.LITTLE_ENDIAN).getInt(); @@ -54,7 +67,7 @@ static PrecompiledCharMapNormalizer fromBase64Str(String s) { offset += 4; } String utf8Str = new String(bytes, offset, bytes.length - offset, StandardCharsets.UTF_8); - return new PrecompiledCharMapNormalizer(offsets, utf8Str); + return new Config(offsets, utf8Str); } // The offsets for each normalization piece. Used in DARTS algorithm to iterate and find appropriate section @@ -64,8 +77,12 @@ static PrecompiledCharMapNormalizer fromBase64Str(String s) { private final byte[] normalizedStrUtf8Bytes; // Continually reused to copy a single char into utf8 bytes private final byte[] reusableCharByteBuffer = new byte[4]; + // reusable char buffer for decoding utf8 bytes to determine char offset corrections + private final char[] reusableCharDecodeBuffer = new char[8]; + private Reader transformedInput; - public PrecompiledCharMapNormalizer(int[] offsets, String normalizedStr) { + public PrecompiledCharMapNormalizer(int[] offsets, String normalizedStr, Reader in) { + super(in); this.offsets = offsets; this.normalizedStrUtf8Bytes = normalizedStr.getBytes(StandardCharsets.UTF_8); } @@ -152,11 +169,7 @@ private Optional normalizePart(byte[] strBytes, int offset, int len) { return Optional.of(new BytesRef(normalizedStrUtf8Bytes, firstIndex, secondIndex - firstIndex)); } - String normalize(String str) { - return normalize((CharSequence) str).utf8ToString(); - } - - BytesRef normalize(CharSequence str) { + Reader normalize(CharSequence str) { // We need to iterate actual Unicode graphemes (this includes surrogate pairs, etc.) ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(CharBuffer.wrap(str)); byte[] strBytes = new byte[byteBuffer.limit()]; @@ -167,9 +180,10 @@ BytesRef normalize(CharSequence str) { // We iterate the whole string, so b.first() is always `0` int startIter = b.first(); int codePointPos = 0; - BytesRefBuilder strBuilder = new BytesRefBuilder(); + CharsRefBuilder strBuilder = new CharsRefBuilder(); strBuilder.grow(strBytes.length); int bytePos = 0; + int normalizedCharPos = 0; // Keep in mind, these break points aren't necessarily surrogate pairs, but also codepoints that contain a combining mark for (int end = b.next(); end != BreakIterator.DONE; startIter = end, end = b.next()) { int byteLen = 0; @@ -181,9 +195,15 @@ BytesRef normalize(CharSequence str) { // The trie only go up to a depth of 5 bytes. // So even looking at it for graphemes (with combining, surrogate, etc.) that are 6+ bytes in length is useless. if (byteLen < 6) { - Optional subStr = normalizePart(strBytes, bytePos, byteLen); - if (subStr.isPresent()) { - strBuilder.append(subStr.get()); + Optional maybeSubStr = normalizePart(strBytes, bytePos, byteLen); + if (maybeSubStr.isPresent()) { + BytesRef subStr = maybeSubStr.get(); + int numChars = UnicodeUtil.UTF8toUTF16(subStr.bytes, subStr.offset, subStr.length, reusableCharDecodeBuffer); + normalizedCharPos += numChars; + if (numChars != end - startIter) { + addOffCorrectMap(normalizedCharPos, getLastCumulativeDiff() + end - startIter - numChars); + } + strBuilder.append(reusableCharDecodeBuffer, 0, numChars); bytePos += byteLen; continue; } @@ -191,18 +211,53 @@ BytesRef normalize(CharSequence str) { int charByteIndex = 0; for (int i = startIter; i < end; i++) { int utf8CharBytes = numUtf8Bytes(str.charAt(i)); - Optional subStr = normalizePart(strBytes, charByteIndex + bytePos, utf8CharBytes); - if (subStr.isPresent()) { - strBuilder.append(subStr.get()); + Optional maybeSubStr = normalizePart(strBytes, charByteIndex + bytePos, utf8CharBytes); + if (maybeSubStr.isPresent()) { + BytesRef subStr = maybeSubStr.get(); + int numChars = UnicodeUtil.UTF8toUTF16(subStr.bytes, subStr.offset, subStr.length, reusableCharDecodeBuffer); + normalizedCharPos += numChars; + // Meaning we removed this char + if (numChars < 1) { + addOffCorrectMap(normalizedCharPos, getLastCumulativeDiff() + 1); + } else if (numChars > 1) { + addOffCorrectMap(normalizedCharPos, getLastCumulativeDiff() - 1); + } + strBuilder.append(reusableCharDecodeBuffer, 0, numChars); } else { - int numBytes = UnicodeUtil.UTF16toUTF8(str, i, 1, reusableCharByteBuffer); - strBuilder.append(reusableCharByteBuffer, 0, numBytes); + normalizedCharPos += 1; + strBuilder.append(str.charAt(i)); } charByteIndex += utf8CharBytes; } bytePos += byteLen; } - return strBuilder.get(); + return new CharArrayReader(strBuilder.chars(), 0, strBuilder.length()); + } + + @Override + public int read(char[] cbuf, int off, int len) throws IOException { + if (transformedInput == null) { + fill(); + } + + return transformedInput.read(cbuf, off, len); } + @Override + public int read() throws IOException { + if (transformedInput == null) { + fill(); + } + + return transformedInput.read(); + } + + private void fill() throws IOException { + List charArrays = new ArrayList<>(); + char[] temp = new char[1024]; + for (int cnt = input.read(temp); cnt > 0; cnt = input.read(temp)) { + charArrays.add(new CharsRef(Arrays.copyOfRange(temp, 0, cnt), 0, cnt)); + } + transformedInput = normalize(new MultiCharSequence(charArrays)); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizer.java new file mode 100644 index 0000000000000..26f7f49d98565 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizer.java @@ -0,0 +1,493 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.nlp.tokenizers; + +import org.apache.lucene.analysis.CharArraySet; +import org.apache.lucene.analysis.CharacterUtils; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.core.Nullable; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.ml.inference.nlp.tokenizers.TokenizerUtils.numUtf8Bytes; +import static org.elasticsearch.xpack.ml.inference.nlp.tokenizers.TokenizerUtils.splitOutNeverSplit; + +/** + * Sentence-piece unigram tokenizer. + * + * Does whitespace tokenization with unigram tokenization on the resulting tokens. + * + * This cannot be a token-filter as it needs access to the offset correction logic provided by the upstream CharFilter. + * + * You may notice that the offsets are always matching the individual tokens position back to the original string. This is because + * there aren't "sub-word" tokens, per-se. So, we don't have tokens that share the same offsets as in WordPiece. + */ +public final class UnigramTokenizer extends Tokenizer { + private static final double K_UNK_PENALTY = 10.0; + static final String PREFIX = "▁"; + + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); + + static UnigramTokenizer build(List neverSplit, List dictionary, List scores, String unknownToken) { + if (dictionary.isEmpty()) { + throw new IllegalArgumentException("vocab empty"); + } + if (unknownToken == null) { + throw new IllegalArgumentException("unknown token ID"); + } + CharArraySet neverSplitSet = new CharArraySet(neverSplit, false); + CharTrie neverSplitTree = CharTrie.build(neverSplit); + if (dictionary.size() != scores.size()) { + throw new IllegalArgumentException( + format("provided vocabulary [%s] and scores [%s] must have the same size", dictionary.size(), scores.size()) + ); + } + int vocabSize = dictionary.size(); + BytesTrie vocabTrie = new BytesTrie(); + Map tokenToId = Maps.newHashMapWithExpectedSize(vocabSize); + int vocabIndex = 0; + double minScore = Double.POSITIVE_INFINITY; + double[] vocabScores = new double[vocabSize]; + for (String word : dictionary) { + minScore = Double.min(minScore, scores.get(vocabIndex)); + BytesRef vocab = new BytesRef(word); + vocabScores[vocabIndex] = scores.get(vocabIndex); + tokenToId.put(vocab, vocabIndex++); + vocabTrie.insert(vocab); + } + return new UnigramTokenizer( + minScore, + vocabScores, + neverSplitTree, + neverSplitSet, + tokenToId, + vocabTrie, + Optional.ofNullable(tokenToId.get(new BytesRef(unknownToken))) + .orElseThrow( + () -> new IllegalArgumentException("provided vocabulary does not contain the unknown token of [" + unknownToken + "]") + ) + ); + } + + private final LinkedList tokens; + private final List tokenizedValues; + private final SimpleWhitespaceTokenizer whitespaceTokenizer; + + private final double minScore; + // This may be configurable in the future + private final boolean fuseUnk = true; + private final double[] vocabScores; + private final CharTrie neverSplit; + private final CharArraySet neverSplitHash; + private final Map vocabToId; + private final BytesTrie vocabTrie; + private final int unknownTokenId; + // This is a buffer that is reused per token for decoding the normalized char-sequence into utf-8 bytes + // It's usage is NOT thread safe + private byte[] normalizedByteBuffer = new byte[128]; + + public UnigramTokenizer( + double minScore, + double[] vocabScores, + CharTrie neverSplit, + CharArraySet neverSplitHash, + Map vocabToId, + BytesTrie vocabTrie, + int unknownTokenId + ) { + super(); + this.tokens = new LinkedList<>(); + this.tokenizedValues = new ArrayList<>(); + this.minScore = minScore; + this.neverSplit = neverSplit; + this.neverSplitHash = neverSplitHash; + this.vocabToId = vocabToId; + this.vocabTrie = vocabTrie; + this.unknownTokenId = unknownTokenId; + this.vocabScores = vocabScores; + this.whitespaceTokenizer = new SimpleWhitespaceTokenizer(); + } + + @Override + public void reset() throws IOException { + super.reset(); + tokens.clear(); + tokenizedValues.clear(); + whitespaceTokenizer.reset(); + } + + @Override + public void end() throws IOException { + super.end(); + offsetAtt.setOffset(correctOffset(whitespaceTokenizer.finalOffset), correctOffset(whitespaceTokenizer.finalOffset)); + } + + @Override + public boolean incrementToken() throws IOException { + clearAttributes(); + if (tokens.isEmpty() == false) { + DelimitedToken.Encoded token = tokens.removeFirst(); + termAtt.setEmpty().append(token.charSequence()); + offsetAtt.setOffset(token.startOffset(), token.endOffset()); + return true; + } + // First, whitespace tokenize + DelimitedToken whitespaceToken = whitespaceTokenizer.next(); + if (whitespaceToken != null) { + if (neverSplitHash.contains(whitespaceToken.charSequence())) { + Integer maybeTokenized = vocabToId.get(new BytesRef(whitespaceToken.charSequence())); + tokenizedValues.add( + new DelimitedToken.Encoded( + whitespaceToken.charSequence().toString(), + Objects.requireNonNullElse(maybeTokenized, unknownTokenId), + correctOffset(whitespaceToken.startOffset()), + correctOffset(whitespaceToken.endOffset()) + ) + ); + offsetAtt.setOffset(correctOffset(whitespaceToken.startOffset()), correctOffset(whitespaceToken.endOffset())); + return true; + } + int inputOffsetStart = whitespaceToken.startOffset(); + // Split out our neverSplit tokens + LinkedList largeTokensWithNeverSplits = splitOutNeverSplit( + whitespaceToken.charSequence(), + neverSplit, + neverSplitHash + ); + // Encode each token, skipping our "never split" ones. + for (DelimitedToken token : largeTokensWithNeverSplits) { + if (neverSplitHash.contains(token.charSequence())) { + Integer tokenId = vocabToId.get(new BytesRef(token.charSequence())); + DelimitedToken.Encoded toAdd = tokenId == null + ? new DelimitedToken.Encoded( + token.charSequence().toString(), + unknownTokenId, + correctOffset(token.startOffset() + inputOffsetStart), + correctOffset(token.endOffset() + inputOffsetStart) + ) + : new DelimitedToken.Encoded( + token.charSequence().toString(), + tokenId, + correctOffset(token.startOffset() + inputOffsetStart), + correctOffset(token.endOffset() + inputOffsetStart) + ); + tokens.add(toAdd); + continue; + } + // We always prefix the initial sub-tokens + // e.g. " asdf-asdf " -> ['', '▁as', 'd', 'f', '', '▁-', 'as', 'd', 'f'] + IntToIntFunction offsetCorrectorFunction = i -> { + int adj = i + inputOffsetStart + token.startOffset(); + // if the passed offset to set is `0`, that means the tokenization probably matched on the meta-space character + // Meaning, the start and end offsets for that token will be the same and ultimately discarded when re-constituting + // tokenized results (if that is necessary for the task). + if (i > 0) { + // We always apply the prefix, so account for that when correcting the offsets, basically, the original + // normalization + // doesn't know about our prefix, so we should find out the correct offsets when not taking it into account. + adj -= PREFIX.length(); + } + return correctOffset(adj); + }; + List tokenList = tokenize( + MultiCharSequence.from(PREFIX, token.charSequence()), + offsetCorrectorFunction + ); + tokenizedValues.addAll(tokenList); + tokens.addAll(tokenList); + } + DelimitedToken.Encoded token = tokens.removeFirst(); + termAtt.setEmpty().append(token.charSequence()); + offsetAtt.setOffset(token.startOffset(), token.endOffset()); + return true; + } + return false; + } + + /** + * This algorithm does the following: + * + * - iterates all the prefixes for the given input sequence, byte by byte. + * - Keeps track of the best scores for the prefixes we find and reconstitutes the tokens from those prefixes + * + * This is derived from: + * https://github.com/google/sentencepiece/blob/901368e0752b57a408ac5c84bca0a219d62c648f/src/unigram_model.cc#L890 + * https://github.com/huggingface/tokenizers/blob/1f1f86dd320fa653924eb1560e51d1b287ab0613/tokenizers/src/models/unigram/model.rs#L229 + * + * @param inputSequence The sequence to encode, should have NO whitespace characters + * @param offsetCorrection Offset corrections to apply to the tokens. Should take into account any previous char-filtering and tokens. + * @return The list of delimited and encoded tokens + */ + List tokenize(CharSequence inputSequence, IntToIntFunction offsetCorrection) { + int bytelen = UnicodeUtil.calcUTF16toUTF8Length(inputSequence, 0, inputSequence.length()); + if (bytelen > normalizedByteBuffer.length) { + normalizedByteBuffer = new byte[bytelen + 1]; + } + int numBytes = UnicodeUtil.UTF16toUTF8(inputSequence, 0, inputSequence.length(), normalizedByteBuffer); + double unkScore = minScore - K_UNK_PENALTY; + BestPathNode[] bestPathNodes = new BestPathNode[numBytes + 1]; + int bytePos = 0; + int charPos = 0; + while (bytePos < numBytes) { + double bestScoreTillHere = bestPathNodes[bytePos] == null ? 0 : bestPathNodes[bytePos].score; + int mblen = numUtf8Bytes(inputSequence.charAt(charPos)); + boolean hasSingleNode = false; + // Find the matching prefixes, incrementing by the chars, each time + for (BytesRef prefix : vocabTrie.matchingPrefixes(new BytesRef(normalizedByteBuffer, bytePos, numBytes - bytePos))) { + int pathKey = bytePos + prefix.length; + int tokenId = vocabToId.get(prefix); + double score = vocabScores[tokenId]; + BestPathNode node = bestPathNodes[pathKey]; + double candidateScore = score + bestScoreTillHere; + if (node == null || candidateScore > node.score) { + if (node == null) { + node = new BestPathNode(); + bestPathNodes[pathKey] = node; + } + node.id = tokenId; + node.score = candidateScore; + node.startsAtBytePos = bytePos; + node.startsAtCharPos = charPos; + } + hasSingleNode = hasSingleNode || (pathKey - bytePos) == mblen; + } + if (hasSingleNode == false) { + BestPathNode node = bestPathNodes[bytePos + mblen]; + double candidateScore = unkScore + bestScoreTillHere; + if (node == null || candidateScore > node.score) { + if (node == null) { + node = new BestPathNode(); + bestPathNodes[bytePos + mblen] = node; + } + node.id = unknownTokenId; + node.score = candidateScore; + node.startsAtBytePos = bytePos; + node.startsAtCharPos = charPos; + } + } + // Move our prefix search to the next char + bytePos += mblen; + ++charPos; + } + int endsAtBytes = numBytes; + int endsAtChars = inputSequence.length(); + List unknownTokens = new ArrayList<>(); + List results = new ArrayList<>(); + // Now we work our way backwards finding the best path nodes, using the `startAtBytePos` as backward links. + while (endsAtBytes > 0) { + BestPathNode node = bestPathNodes[endsAtBytes]; + int startsAtBytes = node.startsAtBytePos; + if (node.id == unknownTokenId && fuseUnk) { + unknownTokens.add( + new DelimitedToken.Encoded( + new String(normalizedByteBuffer, startsAtBytes, endsAtBytes - startsAtBytes, StandardCharsets.UTF_8), + unknownTokenId, + offsetCorrection.apply(node.startsAtCharPos), + offsetCorrection.apply(endsAtChars) + ) + ); + } else { + if (unknownTokens.isEmpty() == false) { + Collections.reverse(unknownTokens); + results.add(DelimitedToken.Encoded.mergeEncodedTokens(unknownTokens)); + unknownTokens.clear(); + } + results.add( + new DelimitedToken.Encoded( + new String(normalizedByteBuffer, startsAtBytes, endsAtBytes - startsAtBytes, StandardCharsets.UTF_8), + node.id, + offsetCorrection.apply(node.startsAtCharPos), + offsetCorrection.apply(endsAtChars) + ) + ); + } + endsAtBytes = startsAtBytes; + endsAtChars = node.startsAtCharPos; + } + if (unknownTokens.isEmpty() == false) { + Collections.reverse(unknownTokens); + results.add(DelimitedToken.Encoded.mergeEncodedTokens(unknownTokens)); + unknownTokens.clear(); + } + Collections.reverse(results); + return results; + } + + private static byte fromBytesRef(BytesRef bytesRef, int index) { + return bytesRef.bytes[index + bytesRef.offset]; + } + + /** + * This is a bytes-trie, this is used for gathering known matching prefixes given the original vocabulary. + * + * NOTE: it is possible for a node to be a "leaf" and have children. It being a "leaf", just means that it is the end of a possible + * vocab entry that matches a given prefix. + */ + static class BytesTrie { + private final Map children; + private boolean isLeaf; + + BytesTrie() { + children = new HashMap<>(); + } + + private void setLeaf(boolean isLeaf) { + this.isLeaf = isLeaf; + } + + private boolean isLeaf() { + return isLeaf; + } + + List matchingPrefixes(BytesRef input) { + List prefixes = new ArrayList<>(); + int numMatchedChildren = 0; + BytesTrie node = this; + for (int i = input.offset; i < input.length + input.offset; i++) { + if (node == null) { + break; + } + if (node.isLeaf() && numMatchedChildren > 0) { + prefixes.add(new BytesRef(input.bytes, input.offset, numMatchedChildren)); + } + node = node.children.get(input.bytes[i]); + numMatchedChildren++; + } + if (node != null && node.isLeaf() && numMatchedChildren > 0) { + prefixes.add(new BytesRef(input.bytes, input.offset, numMatchedChildren)); + } + return prefixes; + } + + void insert(BytesRef bytes) { + if (bytes.length == 0) { + return; + } + BytesTrie currentNode = this; + int currentTokenIndex = 0; + + // find last child + while (currentTokenIndex < bytes.length) { + currentNode = currentNode.children.computeIfAbsent(fromBytesRef(bytes, currentTokenIndex), k -> new BytesTrie()); + currentTokenIndex++; + } + currentNode.setLeaf(true); + } + + public static BytesTrie build(Collection tokens) { + BytesTrie root = new BytesTrie(); + for (BytesRef token : tokens) { + root.insert(token); + } + return root; + } + } + + /** + * This keeps track of the best-path in the vocab for given prefixes + */ + private static class BestPathNode { + // Token Id, -1 if its unknown + private int id = -1; + // Token score + double score = 0.0; + // starts at byte position for walking back the best scoring node + private int startsAtBytePos = -1; + // Its char position for correctly identifying offsets related to the original input + private int startsAtCharPos = -1; + } + + @FunctionalInterface + public interface IntToIntFunction { + int apply(int value); + } + + /** + * This is a simple whitespace tokenizer that generates whitespace delimited tokens from the input stream + * + * This is effectively the lucene WhitespaceTokenizer, slightly adjusted for our needs here. + */ + class SimpleWhitespaceTokenizer { + private int offset = 0, bufferIndex = 0, dataLen = 0, finalOffset = 0; + private static final int IO_BUFFER_SIZE = 4096; + private final CharacterUtils.CharacterBuffer ioBuffer = CharacterUtils.newCharacterBuffer(IO_BUFFER_SIZE); + + void reset() { + bufferIndex = 0; + offset = 0; + dataLen = 0; + finalOffset = 0; + ioBuffer.reset(); + } + + @Nullable + DelimitedToken next() throws IOException { + int length = 0; + int start = -1; // this variable is always initialized + int end = -1; + char[] buffer = termAtt.buffer(); + while (true) { + if (bufferIndex >= dataLen) { + offset += dataLen; + CharacterUtils.fill(ioBuffer, input); // read supplementary char aware with CharacterUtils + if (ioBuffer.getLength() == 0) { + dataLen = 0; // so next offset += dataLen won't decrement offset + if (length > 0) { + break; + } else { + finalOffset = offset; + return null; + } + } + dataLen = ioBuffer.getLength(); + bufferIndex = 0; + } + // use CharacterUtils here to support < 3.1 UTF-16 code unit behavior if the char based + // methods are gone + final int c = Character.codePointAt(ioBuffer.getBuffer(), bufferIndex, ioBuffer.getLength()); + final int charCount = Character.charCount(c); + bufferIndex += charCount; + if (Character.isWhitespace(c) == false) { // if it's a token char + if (length == 0) { // start of token + assert start == -1; + start = offset + bufferIndex - charCount; + end = start; + } else if (length >= buffer.length - 1) { // supplementary could run out of bounds? + // make sure a supplementary fits in the buffer + buffer = termAtt.resizeBuffer(2 + length); + } + end += charCount; + length += Character.toChars(c, buffer, length); + } else if (length > 0) { + break; + } + } + + termAtt.setLength(length); + assert start != -1; + return new DelimitedToken(termAtt, start, finalOffset = end); + } + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizerTests.java index 8016ed2e02278..8541ccfb6c2cd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizerTests.java @@ -10,43 +10,58 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.OptionalInt; +import java.io.StringReader; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; public class PrecompiledCharMapNormalizerTests extends ESTestCase { public void testCommonPrefix() throws IOException { - PrecompiledCharMapNormalizer parsed = loadTestCharMap(); - OptionalInt local = parsed.commonPrefix("\uFB01".getBytes(StandardCharsets.UTF_8)); - assertThat(local.isPresent(), is(true)); - assertThat(local.getAsInt(), equalTo(2130)); - String transformed = parsed.normalize("\uFB01"); - assertThat(transformed, equalTo("fi")); - assertThat(parsed.normalize("𝔾"), equalTo("G")); - assertThat(parsed.normalize("\uD835\uDD60"), equalTo("o")); - assertThat(parsed.normalize("\u200D"), equalTo(" ")); - assertThat(parsed.normalize("เขาไม่ได้พูดสักคำ"), equalTo("เขาไม\u0E48ได\u0E49พ\u0E39ดส\u0E31กค\u0E4Dา")); + PrecompiledCharMapNormalizer.Config parsed = loadTestCharMap(); + assertNormalization("\u0008", parsed, ""); + assertNormalization("\uFB01", parsed, "fi"); + assertNormalization("𝔾", parsed, "G"); + assertNormalization("\uD835\uDD60", parsed, "o"); + assertNormalization("\u200D", parsed, " "); + assertNormalization("เขาไม่ได้พูดสักคำ", parsed, "เขาไม\u0E48ได\u0E49พ\u0E39ดส\u0E31กค\u0E4Dา"); } public void testAdverseScenario() throws IOException { - PrecompiledCharMapNormalizer parsed = loadTestCharMap(); - assertThat(parsed.normalize("คำ"), equalTo("ค\u0e4dา")); + PrecompiledCharMapNormalizer.Config parsed = loadTestCharMap(); + assertNormalization("คำ", parsed, "ค\u0e4dา"); } public void testAdverseScenarioHindi() throws IOException { - PrecompiledCharMapNormalizer parsed = loadTestCharMap(); - assertThat(parsed.normalize("ड़ी दुख"), equalTo("ड\u093cी द\u0941ख")); + PrecompiledCharMapNormalizer.Config parsed = loadTestCharMap(); + assertNormalization("ड़ी दुख", parsed, "ड\u093cी द\u0941ख"); } public void testTwoCharUnicode() throws IOException { - PrecompiledCharMapNormalizer parsed = loadTestCharMap(); - assertThat(parsed.normalize("آ"), equalTo("آ")); + PrecompiledCharMapNormalizer.Config parsed = loadTestCharMap(); + assertNormalization("آ", parsed, "آ"); } - private static PrecompiledCharMapNormalizer loadTestCharMap() throws IOException { + public void testWhitespaceScenario() throws IOException { + PrecompiledCharMapNormalizer.Config parsed = loadTestCharMap(); + assertNormalization("​​από", parsed, " από"); + } + + private void assertNormalization(String input, PrecompiledCharMapNormalizer.Config config, String expected) throws IOException { + PrecompiledCharMapNormalizer normalizer = new PrecompiledCharMapNormalizer( + config.offsets(), + config.utf8str(), + new StringReader(input) + ); + char[] output = new char[64]; + int read = normalizer.read(output, 0, 64); + if (read <= 0) { + assertThat("", equalTo(expected)); + } else { + assertThat(new String(output, 0, read), equalTo(expected)); + } + } + + static PrecompiledCharMapNormalizer.Config loadTestCharMap() throws IOException { PreCompiledCharMap map = PreCompiledCharMap.fromResource( "/org.elasticsearch.xpack.ml.inference.nlp.tokenizers/precompiled_char_map.json" ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizerTests.java new file mode 100644 index 0000000000000..8f04ccf3dc0c2 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizerTests.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.nlp.tokenizers; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase; +import org.apache.lucene.util.BytesRef; + +import java.io.IOException; +import java.io.Reader; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.xpack.ml.inference.nlp.tokenizers.UnigramTokenizer.PREFIX; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; + +public class UnigramTokenizerTests extends BaseTokenStreamTestCase { + private static final String UNKNOWN_TOKEN = ""; + private static final List NEVER_SPLIT = List.of(""); + + public void testSimpleTokenization() throws IOException { + TestNLPAnalyzer analyzer = new TestNLPAnalyzer( + List.of(UNKNOWN_TOKEN, PREFIX + "a", "b", "c", "d", "cd", PREFIX + "ab", PREFIX + "abc", PREFIX + "abcd", ""), + List.of(0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 5.0, 10.0, 0.0), + UNKNOWN_TOKEN, + new PrecompiledCharMapNormalizer.Config(new int[0], "") + ); + + assertAnalyzesToNoCharFilter(analyzer, "", new String[0]); + assertAnalyzesToNoCharFilter(analyzer, "abcd", new String[] { PREFIX + "abcd" }); + } + + public void testLessSimpleTokenization() throws IOException { + TestNLPAnalyzer analyzer = new TestNLPAnalyzer( + List.of(UNKNOWN_TOKEN, PREFIX + "ab", "cd", PREFIX + "abc", "a", "b", "c", "ABC", "abcdabcd", "q", "r", "qr", ""), + List.of(0.0, 0.0, -0.1, -0.2, -0.3, -0.4, -0.5, -0.5, 20.0, 20.5, 20.5, -0.5, 0.0), + UNKNOWN_TOKEN, + new PrecompiledCharMapNormalizer.Config(new int[0], "") + ); + + assertAnalyzesToNoCharFilter(analyzer, "", new String[0]); + assertAnalyzesToNoCharFilter(analyzer, "abcd", new String[] { PREFIX + "ab", "cd" }); + assertAnalyzesToNoCharFilter(analyzer, "abc", new String[] { PREFIX + "abc" }); + assertAnalyzesToNoCharFilter(analyzer, "AB", new String[] { PREFIX + "AB" }); + assertAnalyzesToNoCharFilter(analyzer, "abcc", new String[] { PREFIX + "abc", "c" }); + assertAnalyzesToNoCharFilter(analyzer, " \nabcd \n\n abcc \n", new String[] { PREFIX + "ab", "cd", PREFIX + "abc", "c" }); + } + + public void testLessSimpleTokenizationWithNeverSplit() throws IOException { + TestNLPAnalyzer analyzer = new TestNLPAnalyzer( + List.of( + UNKNOWN_TOKEN, + PREFIX + "ab", + "cd", + PREFIX + "cd", + PREFIX + "abc", + "a", + "b", + "c", + "ABC", + "abcdabcd", + "q", + "r", + "qr", + "" + ), + List.of(0.0, 0.0, -0.1, -0.2, -0.2, -0.3, -0.4, -0.5, -0.5, 20.0, 20.5, 20.5, -0.5, 0.0), + UNKNOWN_TOKEN, + new PrecompiledCharMapNormalizer.Config(new int[0], "") + ); + + assertAnalyzesToNoCharFilter(analyzer, "", new String[] { "" }); + assertAnalyzesToNoCharFilter(analyzer, "abcd", new String[] { "", PREFIX + "ab", "cd", "" }); + assertAnalyzesToNoCharFilter( + analyzer, + " \nabcd \n\n abcc \n", + new String[] { "", PREFIX + "ab", "", PREFIX + "cd", PREFIX + "abc", "c", "" } + ); + } + + public void testTriePrefixMatch() { + List inputs = new ArrayList<>( + List.of( + new BytesRef("a"), + new BytesRef("b"), + new BytesRef("c"), + new BytesRef("d"), + new BytesRef("cd"), + new BytesRef("ab"), + new BytesRef("abc"), + new BytesRef("abcd") + ) + ); + Collections.shuffle(inputs, random()); + UnigramTokenizer.BytesTrie bytesTrie = UnigramTokenizer.BytesTrie.build(inputs); + String input = "abcd"; + assertThat( + bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), + contains("a", "ab", "abc", "abcd") + ); + input = "bcd"; + assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), contains("b")); + input = "cd"; + assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), contains("c", "cd")); + input = "d"; + assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), contains("d")); + input = ""; + assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), empty()); + input = "zabcd"; + assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), empty()); + input = "azbcd"; + assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), contains("a")); + input = "abzcd"; + assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), contains("a", "ab")); + input = "abcdz"; + assertThat( + bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), + contains("a", "ab", "abc", "abcd") + ); + } + + private static class TestNLPAnalyzer extends Analyzer { + private final List dictionary; + private final List scores; + private final String unknownToken; + private final PrecompiledCharMapNormalizer.Config normalizer; + + TestNLPAnalyzer(List dictionary, List scores, String unknownToken, PrecompiledCharMapNormalizer.Config normalizer) { + this.dictionary = dictionary; + this.scores = scores; + this.unknownToken = unknownToken; + this.normalizer = normalizer; + } + + @Override + protected Reader initReader(String fieldName, Reader reader) { + if (normalizer.offsets().length > 0) { + return new PrecompiledCharMapNormalizer(normalizer.offsets(), normalizer.utf8str(), reader); + } + return reader; + } + + @Override + protected TokenStreamComponents createComponents(String fieldName) { + UnigramTokenizer tokenizer = UnigramTokenizer.build(NEVER_SPLIT, dictionary, scores, unknownToken); + return new TokenStreamComponents(tokenizer); + } + } + + private static void assertAnalyzesToNoCharFilter(Analyzer a, String input, String[] output) throws IOException { + assertTokenStreamContents(a.tokenStream("dummy", input), output, null, null, null, null, null, input.length()); + checkResetException(a, input); + // We don't allow the random char filter because our offsets aren't corrected appropriately due to "never_split" + // If we could figure out a way to pass "never_split" through whichever passed char_filter there was, then it would work + checkAnalysisConsistency(random(), a, false, input); + } + +} From f32c8224b2352552c1800ef7e335790abf02f3b1 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 29 Jul 2022 09:20:15 -0700 Subject: [PATCH 005/265] Improve capturing of reaper failure logs --- .../src/main/groovy/elasticsearch.build-complete.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle index 50db02d9e21a1..32967f03c6879 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle @@ -23,7 +23,7 @@ if (buildNumber && performanceTest == null) { fileset(dir: projectDir) { Set fileSet = fileTree(projectDir) { include("**/*.hprof") - include("**/reaper.log") + include(".gradle/reaper/**") include("**/build/test-results/**/*.xml") include("**/build/testclusters/**") exclude("**/build/testclusters/**/data/**") From a3a5332309162d195ef45b4c29496eb819e642f6 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 29 Jul 2022 18:30:46 +0200 Subject: [PATCH 006/265] Make Settings Diffable (#88815) Making settings diffable so that index metadata diffs are smaller whenever the metadata changes without a setting change as well as to make index setting updates over a wider number of indices faster. This saves about 3% of CPU time on master and about half that on data nodes that is just burnt for writing setting strings when bootstrapping many shards benchmarks benchmarks to 50k indices. --- .../admin/cluster/node/info/NodeInfo.java | 2 +- .../put/PutRepositoryRequest.java | 3 +- .../settings/ClusterGetSettingsAction.java | 6 +- .../ClusterUpdateSettingsRequest.java | 5 +- .../ClusterUpdateSettingsResponse.java | 4 +- .../create/CreateSnapshotRequest.java | 3 +- .../restore/RestoreSnapshotRequest.java | 3 +- .../indices/create/CreateIndexRequest.java | 3 +- .../admin/indices/get/GetIndexResponse.java | 4 +- .../settings/get/GetSettingsResponse.java | 4 +- .../settings/put/UpdateSettingsRequest.java | 3 +- .../template/put/PutIndexTemplateRequest.java | 3 +- .../elasticsearch/bootstrap/ServerArgs.java | 2 +- .../elasticsearch/cluster/DiffableUtils.java | 25 +++++- .../cluster/metadata/DesiredNode.java | 2 +- .../cluster/metadata/IndexMetadata.java | 33 +++++-- .../metadata/IndexTemplateMetadata.java | 2 +- .../cluster/metadata/Metadata.java | 9 +- .../cluster/metadata/RepositoryMetadata.java | 2 +- .../cluster/metadata/Template.java | 2 +- .../common/settings/Settings.java | 85 +++++++++++++++---- .../index/analysis/NameOrDefinition.java | 2 +- .../common/settings/SettingsTests.java | 34 +++++++- .../action/PutAutoscalingPolicyAction.java | 2 +- .../autoscaling/policy/AutoscalingPolicy.java | 2 +- .../xpack/core/ccr/AutoFollowMetadata.java | 2 +- .../action/PutAutoFollowPatternAction.java | 2 +- .../core/ccr/action/PutFollowAction.java | 2 +- .../MountSearchableSnapshotRequest.java | 3 +- .../TransformDestIndexSettings.java | 2 +- .../xpack/ml/autoscaling/MlScalingReason.java | 2 +- 31 files changed, 188 insertions(+), 70 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index 1f323bfbf9766..b803a627207da 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -183,7 +183,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } else { out.writeBoolean(true); - Settings.writeSettingsToStream(settings, out); + settings.writeTo(out); } out.writeOptionalWriteable(getInfo(OsInfo.class)); out.writeOptionalWriteable(getInfo(ProcessInfo.class)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index 00acf3d04b83d..4402519c4eacd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -22,7 +22,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; -import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; /** * Register repository request. @@ -207,7 +206,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(name); out.writeString(type); - writeSettingsToStream(settings, out); + settings.writeTo(out); out.writeBoolean(verify); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java index 71767728e57bc..15af362fe4a46 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java @@ -92,9 +92,9 @@ public Response(Settings persistentSettings, Settings transientSettings, Setting @Override public void writeTo(StreamOutput out) throws IOException { assert out.getVersion().onOrAfter(Version.V_8_3_0); - Settings.writeSettingsToStream(persistentSettings, out); - Settings.writeSettingsToStream(transientSettings, out); - Settings.writeSettingsToStream(settings, out); + persistentSettings.writeTo(out); + transientSettings.writeTo(out); + settings.writeTo(out); } public Settings persistentSettings() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 359f9fe976862..23348716ffcca 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -25,7 +25,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; -import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; /** * Request for an update cluster settings action @@ -162,8 +161,8 @@ public ClusterUpdateSettingsRequest persistentSettings(Map source) { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - writeSettingsToStream(transientSettings, out); - writeSettingsToStream(persistentSettings, out); + transientSettings.writeTo(out); + persistentSettings.writeTo(out); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index d661f2a1e8bbd..0891de0c5f970 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -67,8 +67,8 @@ public Settings getPersistentSettings() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - Settings.writeSettingsToStream(transientSettings, out); - Settings.writeSettingsToStream(persistentSettings, out); + transientSettings.writeTo(out); + persistentSettings.writeTo(out); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 251bf6fb17645..04acdb7a3fa40 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -33,7 +33,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.Strings.EMPTY_ARRAY; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; -import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; /** @@ -115,7 +114,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); if (out.getVersion().before(SETTINGS_IN_REQUEST_VERSION)) { - writeSettingsToStream(Settings.EMPTY, out); + Settings.EMPTY.writeTo(out); } out.writeStringArray(featureStates); out.writeBoolean(includeGlobalState); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index c383b2d610e5f..a561ac48ed793 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -30,7 +30,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; -import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; /** @@ -113,7 +112,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(VERSION_SUPPORTING_QUIET_PARAMETER)) { out.writeBoolean(quiet); } - writeSettingsToStream(indexSettings, out); + indexSettings.writeTo(out); out.writeStringArray(ignoreIndexSettings); out.writeOptionalString(snapshotUuid); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index aa65352e50468..985115cca3776 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -43,7 +43,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; -import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; /** * A request to create an index. Best created with {@link org.elasticsearch.client.internal.Requests#createIndexRequest(String)}. @@ -453,7 +452,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(cause); out.writeString(index); - writeSettingsToStream(settings, out); + settings.writeTo(out); if (out.getVersion().before(Version.V_8_0_0)) { if ("{}".equals(mappings)) { out.writeVInt(0); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 5d9ede276bd3b..4f8f24ea5b72f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -172,8 +172,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); MappingMetadata.writeMappingMetadata(out, mappings); out.writeMap(aliases, StreamOutput::writeString, StreamOutput::writeList); - out.writeMap(settings, StreamOutput::writeString, (o, v) -> Settings.writeSettingsToStream(v, o)); - out.writeMap(defaultSettings, StreamOutput::writeString, (o, v) -> Settings.writeSettingsToStream(v, o)); + out.writeMap(settings, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(defaultSettings, StreamOutput::writeString, (o, v) -> v.writeTo(o)); out.writeMap(dataStreams, StreamOutput::writeString, StreamOutput::writeOptionalString); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java index ae90b405b9106..0305f123bba11 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -90,8 +90,8 @@ public String getSetting(String index, String setting) { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(indexToSettings, StreamOutput::writeString, (o, s) -> Settings.writeSettingsToStream(s, o)); - out.writeMap(indexToDefaultSettings, StreamOutput::writeString, (o, s) -> Settings.writeSettingsToStream(s, o)); + out.writeMap(indexToSettings, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(indexToDefaultSettings, StreamOutput::writeString, (o, v) -> v.writeTo(o)); } private static void parseSettingsField( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index f4fe5224927af..6e443744b8835 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -31,7 +31,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; -import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; /** * Request for an update index settings action @@ -182,7 +181,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArrayNullable(indices); indicesOptions.writeIndicesOptions(out); - writeSettingsToStream(settings, out); + settings.writeTo(out); out.writeBoolean(preserveExisting); if (out.getVersion().onOrAfter(Version.V_7_12_0)) { out.writeString(origin); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 66deeaa64df36..a332b57cd475f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -43,7 +43,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; -import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; /** * A request to create an index template. @@ -446,7 +445,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(indexPatterns); out.writeInt(order); out.writeBoolean(create); - writeSettingsToStream(settings, out); + settings.writeTo(out); if (out.getVersion().before(Version.V_8_0_0)) { out.writeVInt(mappings == null ? 0 : 1); if (mappings != null) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ServerArgs.java b/server/src/main/java/org/elasticsearch/bootstrap/ServerArgs.java index cc67a0b742d80..c324370573fce 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ServerArgs.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ServerArgs.java @@ -82,7 +82,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(quiet); out.writeOptionalString(pidFile == null ? null : pidFile.toString()); out.writeSecureString(keystorePassword); - Settings.writeSettingsToStream(nodeSettings, out); + nodeSettings.writeTo(out); out.writeString(configDir.toString()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index f5fb4e797aa58..bcb9222e384ae 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -153,7 +153,7 @@ private static > MapDiff createDiff( inserts++; } else if (entry.getValue().equals(previousValue) == false) { if (valueSerializer.supportsDiffableValues()) { - diffs.add(Map.entry(entry.getKey(), valueSerializer.diff(entry.getValue(), previousValue))); + diffs.add(mapEntry(entry.getKey(), valueSerializer.diff(entry.getValue(), previousValue))); } else { upserts.add(entry); } @@ -307,14 +307,14 @@ private MapDiff( for (int i = 0; i < diffsCount; i++) { K key = keySerializer.readKey(in); Diff diff = valueSerializer.readDiff(in, key); - diffs.add(Map.entry(key, diff)); + diffs.add(mapEntry(key, diff)); } int upsertsCount = in.readVInt(); upserts = upsertsCount == 0 ? List.of() : new ArrayList<>(upsertsCount); for (int i = 0; i < upsertsCount; i++) { K key = keySerializer.readKey(in); T newValue = valueSerializer.read(in, key); - upserts.add(Map.entry(key, newValue)); + upserts.add(mapEntry(key, newValue)); } this.builderCtor = builderCtor; } @@ -402,6 +402,25 @@ public void writeTo(StreamOutput out) throws IOException { } } + private static Map.Entry mapEntry(K key, T newValue) { + return new Map.Entry<>() { + @Override + public K getKey() { + return key; + } + + @Override + public T getValue() { + return newValue; + } + + @Override + public T setValue(T value) { + throw new UnsupportedOperationException(); + } + }; + } + /** * Provides read and write operations to serialize keys of map * @param type of key diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index cf8440f569527..1cb1552e5afa3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -188,7 +188,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - Settings.writeSettingsToStream(settings, out); + settings.writeTo(out); if (out.getVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { out.writeOptionalFloat(processors); out.writeOptionalWriteable(processorsRange); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 032a5acc90c90..412f4cc3ae475 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -77,7 +77,6 @@ import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.validateIpValue; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; -import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_PARTIAL_SETTING_KEY; public class IndexMetadata implements Diffable, ToXContentFragment { @@ -1314,6 +1313,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + private static final Version SETTING_DIFF_VERSION = Version.V_8_5_0; + private static class IndexMetadataDiff implements Diff { private final String index; @@ -1324,7 +1325,12 @@ private static class IndexMetadataDiff implements Diff { private final long aliasesVersion; private final long[] primaryTerms; private final State state; + + // used for BwC when this instance was written by an older version node that does not diff settings yet + @Nullable private final Settings settings; + @Nullable + private final Diff settingsDiff; private final Diff> mappings; private final Diff> aliases; private final Diff> customData; @@ -1342,6 +1348,7 @@ private static class IndexMetadataDiff implements Diff { routingNumShards = after.routingNumShards; state = after.state; settings = after.settings; + settingsDiff = after.settings.diff(before.settings); primaryTerms = after.primaryTerms; // TODO: find a nicer way to do BwC here and just work with Diff here and in networking mappings = DiffableUtils.diff( @@ -1387,7 +1394,13 @@ private static class IndexMetadataDiff implements Diff { aliasesVersion = 1; } state = State.fromId(in.readByte()); - settings = Settings.readSettingsFromStream(in); + if (in.getVersion().onOrAfter(SETTING_DIFF_VERSION)) { + settings = null; + settingsDiff = Settings.readSettingsDiffFromStream(in); + } else { + settings = Settings.readSettingsFromStream(in); + settingsDiff = null; + } primaryTerms = in.readVLongArray(); mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MAPPING_DIFF_VALUE_READER); aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), ALIAS_METADATA_DIFF_VALUE_READER); @@ -1421,7 +1434,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(aliasesVersion); } out.writeByte(state.id); - Settings.writeSettingsToStream(settings, out); + assert settings != null + : "settings should always be non-null since this instance is not expected to have been read from another node"; + if (out.getVersion().onOrAfter(SETTING_DIFF_VERSION)) { + settingsDiff.writeTo(out); + } else { + settings.writeTo(out); + } out.writeVLongArray(primaryTerms); mappings.writeTo(out); aliases.writeTo(out); @@ -1443,7 +1462,11 @@ public IndexMetadata apply(IndexMetadata part) { builder.aliasesVersion(aliasesVersion); builder.setRoutingNumShards(routingNumShards); builder.state(state); - builder.settings(settings); + if (settingsDiff == null) { + builder.settings(settings); + } else { + builder.settings(settingsDiff.apply(part.settings)); + } builder.primaryTerms(primaryTerms); builder.mapping = mappings.apply( ImmutableOpenMap.builder(1).fPut(MapperService.SINGLE_MAPPING_NAME, part.mapping).build() @@ -1531,7 +1554,7 @@ public void writeTo(StreamOutput out, boolean mappingsAsHash) throws IOException } out.writeInt(routingNumShards); out.writeByte(state.id()); - writeSettingsToStream(settings, out); + settings.writeTo(out); out.writeVLongArray(primaryTerms); // TODO: adjust serialization format to using an optional writable if (mapping == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java index 4e9cfa5a5083c..551cda35eb753 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java @@ -206,7 +206,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeInt(order); out.writeStringCollection(patterns); - Settings.writeSettingsToStream(settings, out); + settings.writeTo(out); out.writeMap(mappings, StreamOutput::writeString, (o, v) -> v.writeTo(o)); out.writeCollection(aliases.values()); out.writeOptionalVInt(version); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 506581c7ad5cf..dbbbba70ee067 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -81,7 +81,6 @@ import static org.elasticsearch.cluster.metadata.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; -import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; /** * {@link Metadata} is the part of the {@link ClusterState} which persists across restarts. This persistence is XContent-based, so a @@ -1228,8 +1227,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(clusterUUIDCommitted); out.writeLong(version); coordinationMetadata.writeTo(out); - Settings.writeSettingsToStream(transientSettings, out); - Settings.writeSettingsToStream(persistentSettings, out); + transientSettings.writeTo(out); + persistentSettings.writeTo(out); if (out.getVersion().onOrAfter(Version.V_7_3_0)) { hashesOfConsistentSettings.writeTo(out); } @@ -1314,8 +1313,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(clusterUUID); out.writeBoolean(clusterUUIDCommitted); coordinationMetadata.writeTo(out); - writeSettingsToStream(transientSettings, out); - writeSettingsToStream(persistentSettings, out); + transientSettings.writeTo(out); + persistentSettings.writeTo(out); if (out.getVersion().onOrAfter(Version.V_7_3_0)) { hashesOfConsistentSettings.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java index de40fc641e710..27bdd94a231f4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java @@ -152,7 +152,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(uuid); } out.writeString(type); - Settings.writeSettingsToStream(settings, out); + settings.writeTo(out); out.writeLong(generation); out.writeLong(pendingGeneration); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index 84a5f4c8f6d5e..89115dfe9e51c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -126,7 +126,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } else { out.writeBoolean(true); - Settings.writeSettingsToStream(this.settings, out); + this.settings.writeTo(out); } if (this.mappings == null) { out.writeBoolean(false); diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index 5260dabee66ab..3ae5994ed1ac2 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -13,10 +13,14 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -71,7 +75,7 @@ /** * An immutable settings implementation. */ -public final class Settings implements ToXContentFragment { +public final class Settings implements ToXContentFragment, Writeable, Diffable { public static final Settings EMPTY = new Settings(Map.of(), null); @@ -598,21 +602,54 @@ public static Settings readSettingsFromStream(StreamInput in) throws IOException return builder.build(); } - public static void writeSettingsToStream(Settings settings, StreamOutput out) throws IOException { - // pull settings to exclude secure settings in size() - out.writeMap(settings.settings, StreamOutput::writeString, (streamOutput, value) -> { - if (value instanceof String) { - streamOutput.writeGenericString((String) value); - } else if (value instanceof List) { - @SuppressWarnings("unchecked") - // exploit the fact that we know all lists to be string lists - final List stringList = (List) value; - streamOutput.writeGenericList(stringList, StreamOutput::writeGenericString); - } else { - assert value == null : "unexpected value [" + value + "]"; - streamOutput.writeGenericNull(); + private static final DiffableUtils.ValueSerializer DIFF_VALUE_SERIALIZER = + new DiffableUtils.NonDiffableValueSerializer<>() { + @Override + public void write(Object value, StreamOutput out) throws IOException { + writeSettingValue(out, value); + } + + @Override + public Object read(StreamInput in, String key) throws IOException { + return in.readGenericValue(); } - }); + }; + + public static Diff readSettingsDiffFromStream(StreamInput in) throws IOException { + return new SettingsDiff(DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), DIFF_VALUE_SERIALIZER)); + } + + @Override + public Diff diff(Settings previousState) { + final DiffableUtils.MapDiff> mapDiff = DiffableUtils.diff( + previousState.settings, + settings, + DiffableUtils.getStringKeySerializer(), + DIFF_VALUE_SERIALIZER + ); + return new SettingsDiff(mapDiff); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // pull settings to exclude secure settings in size() + out.writeMap(settings, StreamOutput::writeString, Settings::writeSettingValue); + } + + private static void writeSettingValue(StreamOutput streamOutput, Object value) throws IOException { + // we only have strings, lists of strings or null values so as an optimization we can dispatch those directly instead of going + // through the much slower StreamOutput#writeGenericValue that would write the same format + if (value instanceof String) { + streamOutput.writeGenericString((String) value); + } else if (value instanceof List) { + @SuppressWarnings("unchecked") + // exploit the fact that we know all lists to be string lists + final List stringList = (List) value; + streamOutput.writeGenericList(stringList, StreamOutput::writeGenericString); + } else { + assert value == null : "unexpected value [" + value + "]"; + streamOutput.writeGenericNull(); + } } /** @@ -1517,4 +1554,22 @@ private static String toString(Object o) { static String internKeyOrValue(String s) { return settingLiteralDeduplicator.deduplicate(s); } + + private record SettingsDiff(DiffableUtils.MapDiff> mapDiff) implements Diff { + + @Override + public Settings apply(Settings part) { + final var updated = mapDiff.apply(part.settings); + if (updated == part.settings) { + // noop map diff, no change to the settings + return part; + } + return Settings.of(updated, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + mapDiff.writeTo(out); + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/NameOrDefinition.java b/server/src/main/java/org/elasticsearch/index/analysis/NameOrDefinition.java index e4e68c2f673c3..ca6fb800b6cb7 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/NameOrDefinition.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/NameOrDefinition.java @@ -56,7 +56,7 @@ public void writeTo(StreamOutput out) throws IOException { boolean isNotNullDefinition = this.definition != null; out.writeBoolean(isNotNullDefinition); if (isNotNullDefinition) { - Settings.writeSettingsToStream(definition, out); + definition.writeTo(out); } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java index 64b7153d2a4ff..2cc11e68f9c5e 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; +import org.elasticsearch.cluster.Diff; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -31,6 +32,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.NoSuchElementException; import java.util.Set; @@ -431,7 +433,7 @@ public void testWriteSettingsToStream() throws IOException { builder.putList("test.key4.foo", "1", "2"); builder.setSecureSettings(secureSettings); assertEquals(7, builder.build().size()); - Settings.writeSettingsToStream(builder.build(), out); + builder.build().writeTo(out); StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes); Settings settings = Settings.readSettingsFromStream(in); assertEquals(3, settings.size()); @@ -441,6 +443,34 @@ public void testWriteSettingsToStream() throws IOException { assertEquals(Arrays.asList("1", "2"), settings.getAsList("test.key4.foo")); } + public void testDiff() throws IOException { + final Settings before = Settings.builder().put("foo", "bar").put("setting", "value").build(); + { + final Settings after = Settings.builder() + .put("foo", "bar") + .putNull("null_setting") + .putList("list_setting", List.of("a", "bbb", "ccc")) + .put("added_setting", "added") + .build(); + final Diff diff = after.diff(before); + BytesStreamOutput out = new BytesStreamOutput(); + diff.writeTo(out); + final Diff diffRead = Settings.readSettingsDiffFromStream(out.bytes().streamInput()); + final Settings afterFromDiff = diffRead.apply(before); + assertEquals(after, afterFromDiff); + } + + { + final Settings afterSameAsBefore = Settings.builder().put(before).build(); + final Diff diff = afterSameAsBefore.diff(before); + BytesStreamOutput out = new BytesStreamOutput(); + diff.writeTo(out); + final Diff diffRead = Settings.readSettingsDiffFromStream(out.bytes().streamInput()); + assertSame(before, diff.apply(before)); + assertSame(before, diffRead.apply(before)); + } + } + public void testSecureSettingConflict() { Setting setting = SecureSetting.secureString("something.secure", null); Settings settings = Settings.builder().put("something.secure", "notreallysecure").build(); @@ -596,7 +626,7 @@ public void testReadWriteArray() throws IOException { BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(randomFrom(Version.CURRENT, Version.V_7_0_0)); Settings settings = Settings.builder().putList("foo.bar", "0", "1", "2", "3").put("foo.bar.baz", "baz").build(); - Settings.writeSettingsToStream(settings, output); + settings.writeTo(output); StreamInput in = StreamInput.wrap(BytesReference.toBytes(output.bytes())); Settings build = Settings.readSettingsFromStream(in); assertEquals(2, build.size()); diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java index 64b29e1d83eaa..3a8f09f021d1b 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java @@ -118,7 +118,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeInt(deciders.size()); for (Map.Entry entry : deciders.entrySet()) { out.writeString(entry.getKey()); - Settings.writeSettingsToStream(entry.getValue(), out); + entry.getValue().writeTo(out); } } else { out.writeBoolean(false); diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java index a2c31c124c747..64a50687acba1 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java @@ -103,7 +103,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeInt(deciders.size()); for (Map.Entry entry : deciders.entrySet()) { out.writeString(entry.getKey()); - Settings.writeSettingsToStream(entry.getValue(), out); + entry.getValue().writeTo(out); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java index fb127b2547883..d74be00ef18f3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -367,7 +367,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexPattern); if (out.getVersion().onOrAfter(Version.V_7_9_0)) { - Settings.writeSettingsToStream(settings, out); + settings.writeTo(out); } super.writeTo(out); if (out.getVersion().onOrAfter(Version.V_7_5_0)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index a3154cfec7925..a7080131001ea 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -202,7 +202,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexNamePattern); if (out.getVersion().onOrAfter(Version.V_7_9_0)) { - Settings.writeSettingsToStream(settings, out); + settings.writeTo(out); } parameters.writeTo(out); if (out.getVersion().onOrAfter(Version.V_7_14_0)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index cf4846b761041..910cf956c5dac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -188,7 +188,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(leaderIndex); out.writeString(followerIndex); if (out.getVersion().onOrAfter(Version.V_7_9_0)) { - Settings.writeSettingsToStream(settings, out); + settings.writeTo(out); } parameters.writeTo(out); waitForActiveShards.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java index 0c2a28e576556..88bad8d1e20db 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java @@ -31,7 +31,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; -import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -129,7 +128,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(repositoryName); out.writeString(snapshotName); out.writeString(snapshotIndexName); - writeSettingsToStream(indexSettings, out); + indexSettings.writeTo(out); out.writeStringArray(ignoredIndexSettings); out.writeBoolean(waitForCompletion); if (out.getVersion().onOrAfter(SHARED_CACHE_VERSION)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java index adda7102d6393..c967c2177a819 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java @@ -118,7 +118,7 @@ public static TransformDestIndexSettings fromXContent(final XContentParser parse @Override public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(mappings); - Settings.writeSettingsToStream(settings, out); + settings.writeTo(out); out.writeCollection(aliases); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java index c6d2962b3a3d1..c42f3dc83e52f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java @@ -139,7 +139,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_8_0_0)) { out.writeStringCollection(this.waitingModels); } - Settings.writeSettingsToStream(this.passedConfiguration, out); + this.passedConfiguration.writeTo(out); this.currentMlCapacity.writeTo(out); out.writeOptionalWriteable(this.requiredCapacity); out.writeOptionalVLong(largestWaitingAnalyticsJob); From 4e3b71b6af8408b74577b7a869f818a7418f730b Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Fri, 29 Jul 2022 17:54:33 +0100 Subject: [PATCH 007/265] Ensure that the extended socket options TCP_KEEPXXX are available (#88935) --- .../server/cli/ServerProcess.java | 1 + docs/changelog/88935.yaml | 6 ++ .../src/main/java/module-info.java | 1 + .../netty4/Netty4HttpServerTransport.java | 28 ++++------ .../transport/netty4/NetUtils.java | 56 +++++++++---------- .../transport/netty4/Netty4Transport.java | 41 +++++--------- .../transport/netty4/NetUtilsTests.java | 33 +++++++++-- .../netty4/SimpleNetty4TransportTests.java | 8 +-- 8 files changed, 91 insertions(+), 83 deletions(-) create mode 100644 docs/changelog/88935.yaml diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java index ecb5bd89a694f..59152f0550f89 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java @@ -216,6 +216,7 @@ private static Process createProcess( command.addAll(jvmOptions); command.add("--module-path"); command.add(esHome.resolve("lib").toString()); + command.add("--add-modules=jdk.net"); // very special circumstance; explicit modules should typically not be added here command.add("-m"); command.add("org.elasticsearch.server/org.elasticsearch.bootstrap.Elasticsearch"); diff --git a/docs/changelog/88935.yaml b/docs/changelog/88935.yaml new file mode 100644 index 0000000000000..f81b4838a4aa1 --- /dev/null +++ b/docs/changelog/88935.yaml @@ -0,0 +1,6 @@ +pr: 88935 +summary: Ensure that the extended socket options TCP_KEEPXXX are available +area: Network +type: bug +issues: + - 88897 diff --git a/modules/transport-netty4/src/main/java/module-info.java b/modules/transport-netty4/src/main/java/module-info.java index cb718539d0f11..92217b419c666 100644 --- a/modules/transport-netty4/src/main/java/module-info.java +++ b/modules/transport-netty4/src/main/java/module-info.java @@ -7,6 +7,7 @@ */ module org.elasticsearch.transport.netty4 { + requires jdk.net; requires org.elasticsearch.base; requires org.elasticsearch.server; requires org.elasticsearch.xcontent; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 7fa8ca28aa31b..5f49e2505cbf6 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -57,7 +57,6 @@ import org.elasticsearch.xcontent.NamedXContentRegistry; import java.net.InetSocketAddress; -import java.net.SocketOption; import java.util.concurrent.TimeUnit; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; @@ -215,25 +214,22 @@ protected void doStart() { // Netty logs a warning if it can't set the option, so try this only on supported platforms if (IOUtils.LINUX || IOUtils.MAC_OS_X) { if (SETTING_HTTP_TCP_KEEP_IDLE.get(settings) >= 0) { - final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); - if (keepIdleOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepIdleOption), SETTING_HTTP_TCP_KEEP_IDLE.get(settings)); - } + serverBootstrap.childOption( + NioChannelOption.of(NetUtils.getTcpKeepIdleSocketOption()), + SETTING_HTTP_TCP_KEEP_IDLE.get(settings) + ); } if (SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) >= 0) { - final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); - if (keepIntervalOption != null) { - serverBootstrap.childOption( - NioChannelOption.of(keepIntervalOption), - SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) - ); - } + serverBootstrap.childOption( + NioChannelOption.of(NetUtils.getTcpKeepIntervalSocketOption()), + SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) + ); } if (SETTING_HTTP_TCP_KEEP_COUNT.get(settings) >= 0) { - final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); - if (keepCountOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepCountOption), SETTING_HTTP_TCP_KEEP_COUNT.get(settings)); - } + serverBootstrap.childOption( + NioChannelOption.of(NetUtils.getTcpKeepCountSocketOption()), + SETTING_HTTP_TCP_KEEP_COUNT.get(settings) + ); } } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NetUtils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NetUtils.java index ffd423a7b092a..6c93b6036578d 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NetUtils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NetUtils.java @@ -8,12 +8,15 @@ package org.elasticsearch.transport.netty4; +import jdk.net.ExtendedSocketOptions; + +import org.elasticsearch.core.SuppressForbidden; + import java.io.IOException; -import java.lang.reflect.Field; import java.net.SocketOption; import java.net.StandardSocketOptions; import java.nio.channels.NetworkChannel; -import java.util.Arrays; +import java.util.Objects; /** * Utilities for network-related methods. @@ -22,37 +25,31 @@ public class NetUtils { private NetUtils() {} + // Accessors to the extended socket options reduce the proliferation of the non-portable + // ExtendedSocketOptions type. + /** - * Returns the extended TCP_KEEPIDLE socket option, if available on this JDK + * Returns the extended TCP_KEEPIDLE socket option. */ - public static SocketOption getTcpKeepIdleSocketOptionOrNull() { - return getExtendedSocketOptionOrNull("TCP_KEEPIDLE"); + @SuppressForbidden(reason = "access to non-portable socket option required") + public static SocketOption getTcpKeepIdleSocketOption() { + return ExtendedSocketOptions.TCP_KEEPIDLE; } /** - * Returns the extended TCP_KEEPINTERVAL socket option, if available on this JDK + * Returns the extended TCP_KEEPINTERVAL socket option. */ - public static SocketOption getTcpKeepIntervalSocketOptionOrNull() { - return getExtendedSocketOptionOrNull("TCP_KEEPINTERVAL"); + @SuppressForbidden(reason = "access to non-portable socket option required") + public static SocketOption getTcpKeepIntervalSocketOption() { + return ExtendedSocketOptions.TCP_KEEPINTERVAL; } /** - * Returns the extended TCP_KEEPCOUNT socket option, if available on this JDK + * Returns the extended TCP_KEEPCOUNT socket option. */ - public static SocketOption getTcpKeepCountSocketOptionOrNull() { - return getExtendedSocketOptionOrNull("TCP_KEEPCOUNT"); - } - - @SuppressWarnings("unchecked") - private static SocketOption getExtendedSocketOptionOrNull(String fieldName) { - try { - final Class extendedSocketOptionsClass = Class.forName("jdk.net.ExtendedSocketOptions"); - final Field field = extendedSocketOptionsClass.getField(fieldName); - return (SocketOption) field.get(null); - } catch (Exception t) { - // ignore - return null; - } + @SuppressForbidden(reason = "access to non-portable socket option required") + public static SocketOption getTcpKeepCountSocketOption() { + return ExtendedSocketOptions.TCP_KEEPCOUNT; } /** @@ -67,13 +64,9 @@ public static void tryEnsureReasonableKeepAliveConfig(NetworkChannel socketChann if (socketChannel.supportedOptions().contains(StandardSocketOptions.SO_KEEPALIVE)) { final Boolean keepalive = socketChannel.getOption(StandardSocketOptions.SO_KEEPALIVE); assert keepalive != null; - if (keepalive.booleanValue()) { - for (SocketOption option : Arrays.asList( - NetUtils.getTcpKeepIdleSocketOptionOrNull(), - NetUtils.getTcpKeepIntervalSocketOptionOrNull() - )) { - setMinValueForSocketOption(socketChannel, option, 300); - } + if (keepalive) { + setMinValueForSocketOption(socketChannel, getTcpKeepIdleSocketOption(), 300); + setMinValueForSocketOption(socketChannel, getTcpKeepIntervalSocketOption(), 300); } } } catch (Exception e) { @@ -84,7 +77,8 @@ public static void tryEnsureReasonableKeepAliveConfig(NetworkChannel socketChann } private static void setMinValueForSocketOption(NetworkChannel socketChannel, SocketOption option, int minValue) { - if (option != null && socketChannel.supportedOptions().contains(option)) { + Objects.requireNonNull(option); + if (socketChannel.supportedOptions().contains(option)) { try { final Integer currentIdleVal = socketChannel.getOption(option); assert currentIdleVal != null; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 37c87d19e811f..0241669d15b8e 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -47,7 +47,6 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.net.SocketOption; import java.util.Map; import static org.elasticsearch.common.settings.Setting.byteSizeSetting; @@ -165,22 +164,19 @@ private Bootstrap createClientBootstrap(SharedGroupFactory.SharedGroup sharedGro if (TransportSettings.TCP_KEEP_ALIVE.get(settings)) { // Note that Netty logs a warning if it can't set the option if (TransportSettings.TCP_KEEP_IDLE.get(settings) >= 0) { - final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); - if (keepIdleOption != null) { - bootstrap.option(NioChannelOption.of(keepIdleOption), TransportSettings.TCP_KEEP_IDLE.get(settings)); - } + bootstrap.option(NioChannelOption.of(NetUtils.getTcpKeepIdleSocketOption()), TransportSettings.TCP_KEEP_IDLE.get(settings)); } if (TransportSettings.TCP_KEEP_INTERVAL.get(settings) >= 0) { - final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); - if (keepIntervalOption != null) { - bootstrap.option(NioChannelOption.of(keepIntervalOption), TransportSettings.TCP_KEEP_INTERVAL.get(settings)); - } + bootstrap.option( + NioChannelOption.of(NetUtils.getTcpKeepIntervalSocketOption()), + TransportSettings.TCP_KEEP_INTERVAL.get(settings) + ); } if (TransportSettings.TCP_KEEP_COUNT.get(settings) >= 0) { - final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); - if (keepCountOption != null) { - bootstrap.option(NioChannelOption.of(keepCountOption), TransportSettings.TCP_KEEP_COUNT.get(settings)); - } + bootstrap.option( + NioChannelOption.of(NetUtils.getTcpKeepCountSocketOption()), + TransportSettings.TCP_KEEP_COUNT.get(settings) + ); } } @@ -236,23 +232,16 @@ private void createServerBootstrap(ProfileSettings profileSettings, SharedGroupF if (profileSettings.tcpKeepAlive) { // Note that Netty logs a warning if it can't set the option if (profileSettings.tcpKeepIdle >= 0) { - final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); - if (keepIdleOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepIdleOption), profileSettings.tcpKeepIdle); - } + serverBootstrap.childOption(NioChannelOption.of(NetUtils.getTcpKeepIdleSocketOption()), profileSettings.tcpKeepIdle); } if (profileSettings.tcpKeepInterval >= 0) { - final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); - if (keepIntervalOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepIntervalOption), profileSettings.tcpKeepInterval); - } - + serverBootstrap.childOption( + NioChannelOption.of(NetUtils.getTcpKeepIntervalSocketOption()), + profileSettings.tcpKeepInterval + ); } if (profileSettings.tcpKeepCount >= 0) { - final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); - if (keepCountOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepCountOption), profileSettings.tcpKeepCount); - } + serverBootstrap.childOption(NioChannelOption.of(NetUtils.getTcpKeepCountSocketOption()), profileSettings.tcpKeepCount); } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NetUtilsTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NetUtilsTests.java index 6cea1296f2e7a..1a4e7b3fc1565 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NetUtilsTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NetUtilsTests.java @@ -8,17 +8,38 @@ package org.elasticsearch.transport.netty4; -import org.apache.lucene.util.Constants; import org.elasticsearch.core.IOUtils; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.channels.NetworkChannel; +import java.nio.channels.SocketChannel; + +import static org.hamcrest.Matchers.hasItem; + public class NetUtilsTests extends ESTestCase { - public void testExtendedSocketOptions() { - assumeTrue("JDK possibly not supported", Constants.JVM_NAME.contains("HotSpot") || Constants.JVM_NAME.contains("OpenJDK")); + public void testExtendedSocketOptions() throws IOException { + assertTrue( + "jdk.net module not resolved", + ModuleLayer.boot().modules().stream().map(Module::getName).anyMatch(nm -> nm.equals("jdk.net")) + ); + assumeTrue("Platform possibly not supported", IOUtils.LINUX || IOUtils.MAC_OS_X); - assertNotNull(NetUtils.getTcpKeepIdleSocketOptionOrNull()); - assertNotNull(NetUtils.getTcpKeepIntervalSocketOptionOrNull()); - assertNotNull(NetUtils.getTcpKeepCountSocketOptionOrNull()); + try (var channel = networkChannel()) { + var options = channel.supportedOptions(); + assertThat(options, hasItem(NetUtils.getTcpKeepIdleSocketOption())); + assertThat(options, hasItem(NetUtils.getTcpKeepIntervalSocketOption())); + assertThat(options, hasItem(NetUtils.getTcpKeepCountSocketOption())); + } + } + + private static NetworkChannel networkChannel() { + try { + return SocketChannel.open(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 253758e378856..91a31f19f0e3b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -125,12 +125,12 @@ private void checkDefaultKeepAliveOptions(TcpChannel channel) throws IOException assertThat(nettyChannel.getNettyChannel(), instanceOf(Netty4NioSocketChannel.class)); Netty4NioSocketChannel netty4NioSocketChannel = (Netty4NioSocketChannel) nettyChannel.getNettyChannel(); SocketChannel socketChannel = netty4NioSocketChannel.javaChannel(); - assertThat(socketChannel.supportedOptions(), hasItem(NetUtils.getTcpKeepIdleSocketOptionOrNull())); - Integer keepIdle = socketChannel.getOption(NetUtils.getTcpKeepIdleSocketOptionOrNull()); + assertThat(socketChannel.supportedOptions(), hasItem(NetUtils.getTcpKeepIdleSocketOption())); + Integer keepIdle = socketChannel.getOption(NetUtils.getTcpKeepIdleSocketOption()); assertNotNull(keepIdle); assertThat(keepIdle, lessThanOrEqualTo(500)); - assertThat(socketChannel.supportedOptions(), hasItem(NetUtils.getTcpKeepIntervalSocketOptionOrNull())); - Integer keepInterval = socketChannel.getOption(NetUtils.getTcpKeepIntervalSocketOptionOrNull()); + assertThat(socketChannel.supportedOptions(), hasItem(NetUtils.getTcpKeepIntervalSocketOption())); + Integer keepInterval = socketChannel.getOption(NetUtils.getTcpKeepIntervalSocketOption()); assertNotNull(keepInterval); assertThat(keepInterval, lessThanOrEqualTo(500)); } From b4b68c87e99c4e51492be0fdef54462bf1074e13 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 29 Jul 2022 19:18:00 +0200 Subject: [PATCH 008/265] Bring back lost optimization to building metadata from a diff (#88950) This was lost again due to another merge conflict. --- .../main/java/org/elasticsearch/cluster/metadata/Metadata.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index dbbbba70ee067..3ea7056e0ef99 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1257,7 +1257,7 @@ public Metadata apply(Metadata part) { builder.templates(templates.apply(part.templates)); builder.customs(customs.apply(part.customs)); builder.put(reservedStateMetadata.apply(part.reservedStateMetadata)); - return builder.build(); + return builder.build(true); } } From e501609604351ce9d9eb9792ad0d2379db69e734 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 29 Jul 2022 11:28:31 -0700 Subject: [PATCH 009/265] Deprecate network plugins (#88924) Network plugins provide network implementations. In the past this has been used for alternatives to netty based networking, using the JDK's nio. However, nio has now been removed, and it is inadvisable for a plugin to implement this low level part of the system. Therefore, this commit marks the NetworkPlugin interface as deprecated. --- docs/changelog/88924.yaml | 10 ++++++ docs/reference/migration/migrate_8_5.asciidoc | 33 +++++++++++++++++++ .../elasticsearch/plugins/NetworkPlugin.java | 1 + 3 files changed, 44 insertions(+) create mode 100644 docs/changelog/88924.yaml diff --git a/docs/changelog/88924.yaml b/docs/changelog/88924.yaml new file mode 100644 index 0000000000000..f6eef41154cfd --- /dev/null +++ b/docs/changelog/88924.yaml @@ -0,0 +1,10 @@ +pr: 88924 +summary: Deprecate network plugins +area: Infra/Plugins +type: deprecation +issues: [] +deprecation: + title: Deprecate network plugins + area: Java API + details: Plugins extending NetworkPlugin are deprecated. + impact: Users should discontinue using plugins which extend NetworkPlugin. diff --git a/docs/reference/migration/migrate_8_5.asciidoc b/docs/reference/migration/migrate_8_5.asciidoc index 91404e7b18ec5..046b3701bf332 100644 --- a/docs/reference/migration/migrate_8_5.asciidoc +++ b/docs/reference/migration/migrate_8_5.asciidoc @@ -20,3 +20,36 @@ coming::[8.5.0] There are no breaking changes in {es} 8.5. // end::notable-breaking-changes[] +[discrete] +[[deprecated-8.5]] +=== Deprecations + +The following functionality has been deprecated in {es} 8.5 +and will be removed in a future version. +While this won't have an immediate impact on your applications, +we strongly encourage you to take the described steps to update your code +after upgrading to 8.5. + +To find out if you are using any deprecated functionality, +enable <>. + + +[discrete] +[[deprecations_85_network_plugins]] +==== Plugin API deprecations + +[[network_plugins_deprecated]] +Plugins that extend the NetworkPlugin interface are deprecated. +[%collapsible] +==== +*Details* + +Plugins may override funcionality that controls how nodes connect +with other nodes over TCP/IP. These plugins extend the NetworkPlugin +interface. In the next major release, these plugins will fail +to install. + +*Impact* + +Discontinue using any plugins which extend NetworkPlugin. You can +see if any plugins use deprecated functionality by checking +the Elasticsearch deprecation log. +==== diff --git a/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java b/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java index 80561167b6442..4a07d1809a03c 100644 --- a/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java @@ -30,6 +30,7 @@ /** * Plugin for extending network and transport related classes */ +@Deprecated public interface NetworkPlugin { /** From f092d90f00eb0496f0cee9b47d5d4d63c20cebd2 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 29 Jul 2022 12:26:51 -0700 Subject: [PATCH 010/265] Replace Rocky Linux with RHEL in pull request packaging test job --- ...c+elasticsearch+pull-request+packaging-tests-unix-sample.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml index 1942bc53ded11..7d52ec346b2ed 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml @@ -31,7 +31,7 @@ type: label-expression name: os values: - - rocky-linux-8-packaging + - rhel-8-packaging - ubuntu-20.04-packaging - axis: type: user-defined From 41af3cb8e542e158e5cc252bce9062d7e676c632 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 29 Jul 2022 13:32:24 -0700 Subject: [PATCH 011/265] Ignore beats artifacts when resolving all artifact dependencies (#88960) --- distribution/docker/build.gradle | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index a3be272a09b0c..a1217e391a589 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -487,3 +487,8 @@ subprojects { Project subProject -> } } } + +tasks.named('resolveAllDependencies') { + // Don't try and resolve filebeat or metricbeat snapshots as they may not always be available + configs = configurations.matching { it.name.endsWith('beat') == false } +} From 713657f11806eff7394306673c7a650477f17e55 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 29 Jul 2022 22:37:09 +0200 Subject: [PATCH 012/265] Fix slow assertion running in production in RoutingNodes (#88951) This needs to be in a separate method, it's currently running in production and uses significant CPU time. Broken in #88794 --- .../java/org/elasticsearch/cluster/routing/RoutingNodes.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 0d5c421e2c384..14df43d0845dd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -146,7 +146,12 @@ private RoutingNodes(RoutingTable routingTable, DiscoveryNodes discoveryNodes, b } } } + assert invariant(); + } + + private boolean invariant() { nodesToShards.values().forEach(RoutingNode::invariant); + return true; } private RoutingNodes(RoutingNodes routingNodes) { From e3c6726a71a8958d061c074611f93d0e6e00634d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 29 Jul 2022 14:00:34 -0700 Subject: [PATCH 013/265] Deprecate overriding DiscoveryPlugin internals (#88925) DiscoveryPlugin allows extending getJoinValidator and getElectionStrategies. These are implementation details of the system. This commit deprecates these methods so that plugin authors are discouraged from overriding them. --- docs/changelog/88925.yaml | 12 +++++++++++ docs/reference/migration/migrate_8_5.asciidoc | 20 ++++++++++++++++++- .../plugins/DiscoveryPlugin.java | 2 ++ 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/88925.yaml diff --git a/docs/changelog/88925.yaml b/docs/changelog/88925.yaml new file mode 100644 index 0000000000000..3d410dace48ba --- /dev/null +++ b/docs/changelog/88925.yaml @@ -0,0 +1,12 @@ +pr: 88925 +summary: Deprecate overriding `DiscoveryPlugin` internals +area: Infra/Plugins +type: deprecation +issues: [] +deprecation: + title: Deprecate overriding `DiscoveryPlugin` internals + area: Java API + details: Plugins extending DiscoveryPlugin and overriding + join validators or election strategies are deprecated. + impact: Users should discontinue using plugins that override join + validators or election strategies in DiscoveryPlugin. diff --git a/docs/reference/migration/migrate_8_5.asciidoc b/docs/reference/migration/migrate_8_5.asciidoc index 046b3701bf332..a4ff5e9f6b20b 100644 --- a/docs/reference/migration/migrate_8_5.asciidoc +++ b/docs/reference/migration/migrate_8_5.asciidoc @@ -35,7 +35,7 @@ enable <>. [discrete] -[[deprecations_85_network_plugins]] +[[deprecations_85_plugins]] ==== Plugin API deprecations [[network_plugins_deprecated]] @@ -53,3 +53,21 @@ Discontinue using any plugins which extend NetworkPlugin. You can see if any plugins use deprecated functionality by checking the Elasticsearch deprecation log. ==== + +[[discoveryplugin_joinvalidator_and_election_strategies_deprecated]] +.Extending DiscoveryPlugin to override join validators or election strategies is deprecated +[%collapsible] +==== +*Details* + +Plugins that extend DiscoveryPlugin may override getJoinValidator and +getElectionStrategies. These methods are implementation details of the +clustering mechanism within Elasticsearch. They should not be overriden. +In the next major release, plugins overriding getJoinValidator or +getElectionStrategies will fail to install. + +*Impact* + +Discontinue using any plugins that override getJoinValidator or +getElectionStrategies in DiscoveryPlugin. You can see if any plugins +use deprecated functionality by checking the Elasticsearch deprecation log. +==== + diff --git a/server/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java b/server/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java index a15bb15f1c8de..7d3b1c9bfdd69 100644 --- a/server/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java @@ -76,6 +76,7 @@ default Map> getSeedHostProviders( * join attempt but might be called multiple times during the lifetime of a node. Validators are expected to throw a * {@link IllegalStateException} if the node and the cluster-state are incompatible. */ + @Deprecated default BiConsumer getJoinValidator() { return null; } @@ -83,6 +84,7 @@ default BiConsumer getJoinValidator() { /** * Allows plugging in election strategies (see {@link ElectionStrategy}) that define a customized notion of an election quorum. */ + @Deprecated default Map getElectionStrategies() { return Collections.emptyMap(); } From c06fc60624c69ff2bd549f6c33d02eedf6274378 Mon Sep 17 00:00:00 2001 From: Nikola Grcevski <6207777+grcevski@users.noreply.github.com> Date: Fri, 29 Jul 2022 17:04:23 -0400 Subject: [PATCH 014/265] [TEST] Reliable settings.json update in FileSettingsIT (#88959) --- .../reservedstate/service/FileSettingsServiceIT.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index f4a9d2993d188..708d9226c8d4d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -25,6 +25,8 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -84,7 +86,10 @@ private void writeJSONFile(String node, String json) throws Exception { FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); Files.createDirectories(fileSettingsService.operatorSettingsDir()); - Files.write(fileSettingsService.operatorSettingsFile(), Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); + Path tempFilePath = createTempFile(); + + Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); + Files.move(tempFilePath, fileSettingsService.operatorSettingsFile(), StandardCopyOption.ATOMIC_MOVE); } private CountDownLatch setupClusterStateListener(String node) { From bc9a93975e4f3d9ae3e391e28ecd2fb28b55d621 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 29 Jul 2022 14:30:04 -0700 Subject: [PATCH 015/265] Add deprecation message for deprecated plugin APIs (#88961) Plugin APIs are defined by a set of interfaces from server. Many of these APIs are actually implementation details of the system. As we move these implementation details to use different hook mechanisms so that internals are only implementable by builtin components, the existing plugin APIs need to be deprecated. Java provides a means to indicate deprecation - through the `@Deprecated` annotation. But that annotation is only seen when compiling a plugin implementing deprecated hooks, and only then if deprecation warnings are not disabled. This commit adds an introspection step to plugin initialization which inspects each loaded plugin and looks for any APIs marked with the @Deprecated annotation which are overridden by the plugin. If any are found, deprecation log messages are then emitted to the deprecation log. --- docs/changelog/88961.yaml | 5 ++ .../plugins/PluginIntrospector.java | 85 ++++++++++++++----- .../elasticsearch/plugins/PluginsService.java | 54 +++++++++++- .../plugins/PluginIntrospectorTests.java | 19 +++++ .../plugins/PluginsServiceTests.java | 50 +++++++++++ .../elasticsearch/plugins/PluginTestUtil.java | 20 +++++ 6 files changed, 206 insertions(+), 27 deletions(-) create mode 100644 docs/changelog/88961.yaml diff --git a/docs/changelog/88961.yaml b/docs/changelog/88961.yaml new file mode 100644 index 0000000000000..9d1460341675e --- /dev/null +++ b/docs/changelog/88961.yaml @@ -0,0 +1,5 @@ +pr: 88961 +summary: Add deprecation message for deprecated plugin APIs +area: Infra/Plugins +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java b/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java index d2b0c1d6f1176..92a58a809c59d 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java @@ -12,13 +12,15 @@ import java.lang.reflect.Method; import java.lang.reflect.Modifier; -import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; import java.util.stream.Stream; import static java.util.stream.Collectors.toMap; @@ -49,12 +51,22 @@ final class PluginIntrospector { SystemIndexPlugin.class ); - private record MethodType(String name, Class[] parameterTypes) {} + private final Set> deprecatedPluginClasses = pluginClasses.stream() + .filter(c -> c.isAnnotationPresent(Deprecated.class)) + .collect(Collectors.toUnmodifiableSet()); + + private record MethodType(String name, Class[] parameterTypes, boolean isDeprecated) {} private final Map, List> pluginMethodsMap; + private final Map, List> pluginDeprecatedMethodsMap; private PluginIntrospector() { pluginMethodsMap = pluginClasses.stream().collect(toMap(Function.identity(), PluginIntrospector::findMethods)); + pluginDeprecatedMethodsMap = pluginMethodsMap.entrySet() + .stream() + .map(e -> Map.entry(e.getKey(), e.getValue().stream().filter(MethodType::isDeprecated).toList())) + .filter(e -> e.getValue().isEmpty() == false) + .collect(toMap(Map.Entry::getKey, Map.Entry::getValue)); } static PluginIntrospector getInstance() { @@ -67,7 +79,7 @@ static PluginIntrospector getInstance() { */ List interfaces(final Class pluginClass) { assert Plugin.class.isAssignableFrom(pluginClass); - return interfaceClasses(pluginClass).map(Class::getSimpleName).sorted().toList(); + return interfaceClasses(pluginClass, pluginClasses::contains).map(Class::getSimpleName).sorted().toList(); } /** @@ -75,28 +87,59 @@ List interfaces(final Class pluginClass) { * contains the simple names of the methods. */ List overriddenMethods(final Class pluginClass) { + return findOverriddenMethods(pluginClass, pluginMethodsMap).keySet().stream().sorted().toList(); + } + + /** + * Returns the list of deprecated Elasticsearch plugin interfaces which are implemented by the + * given plugin implementation class. The list contains the simple names of the interfaces. + */ + List deprecatedInterfaces(final Class pluginClass) { + assert Plugin.class.isAssignableFrom(pluginClass); + return interfaceClasses(pluginClass, deprecatedPluginClasses::contains).map(Class::getSimpleName).sorted().toList(); + } + + /** + * Returns the deprecated methods from Elasticsearch plugin interfaces which are implemented by + * the given plugin implementation class. The map is from the simple method name to the simple interface class name. + * + * @apiNote The simple names work as a key because they are unique across all plugin interfaces. + */ + Map deprecatedMethods(final Class pluginClass) { + return findOverriddenMethods(pluginClass, pluginDeprecatedMethodsMap); + } + + // finds the subset of given methods that are overridden by the given class + // returns a map of method name to interface name the method was declared in + private Map findOverriddenMethods(final Class pluginClass, Map, List> methodsMap) { assert Plugin.class.isAssignableFrom(pluginClass); - List> esPluginClasses = Stream.concat(Stream.of(Plugin.class), interfaceClasses(pluginClass)).toList(); + List> clazzes = Stream.concat(Stream.of(Plugin.class), interfaceClasses(pluginClass, methodsMap::containsKey)).toList(); + if (clazzes.isEmpty()) { + return Map.of(); + } - List overriddenMethods = new ArrayList<>(); - for (var esPluginClass : esPluginClasses) { - List esPluginMethods = pluginMethodsMap.get(esPluginClass); - assert esPluginMethods != null : "no plugin methods for " + esPluginClass; - for (var mt : esPluginMethods) { + Map overriddenMethods = new HashMap<>(); + for (var clazz : clazzes) { + List methods = methodsMap.get(clazz); + if (methods == null) { + continue; + } + for (var mt : methods) { try { Method m = pluginClass.getMethod(mt.name(), mt.parameterTypes()); - if (m.getDeclaringClass() == esPluginClass) { + if (m.getDeclaringClass() == clazz) { // it's not overridden } else { - assert esPluginClass.isAssignableFrom(m.getDeclaringClass()); - overriddenMethods.add(mt.name()); + assert clazz.isAssignableFrom(m.getDeclaringClass()); + var existing = overriddenMethods.put(mt.name(), clazz.getSimpleName()); + assert existing == null; } } catch (NoSuchMethodException unexpected) { throw new AssertionError(unexpected); } } } - return overriddenMethods.stream().sorted().toList(); + return Map.copyOf(overriddenMethods); } // Returns the non-static methods declared in the given class. @@ -106,28 +149,24 @@ private static List findMethods(Class cls) { assert cls.isInterface() || cls == Plugin.class : cls; return Arrays.stream(cls.getDeclaredMethods()) .filter(m -> Modifier.isStatic(m.getModifiers()) == false) - .map(m -> new MethodType(m.getName(), m.getParameterTypes())) + .map(m -> new MethodType(m.getName(), m.getParameterTypes(), m.isAnnotationPresent(Deprecated.class))) .toList(); } // Returns a stream of o.e.XXXPlugin interfaces, that the given plugin class implements. - private Stream> interfaceClasses(Class pluginClass) { + private static Stream> interfaceClasses(Class pluginClass, Predicate> classPredicate) { assert Plugin.class.isAssignableFrom(pluginClass); Set> pluginInterfaces = new HashSet<>(); do { - Arrays.stream(pluginClass.getInterfaces()).forEach(inf -> superInterfaces(inf, pluginInterfaces)); + Arrays.stream(pluginClass.getInterfaces()).forEach(inf -> superInterfaces(inf, pluginInterfaces, classPredicate)); } while ((pluginClass = pluginClass.getSuperclass()) != java.lang.Object.class); return pluginInterfaces.stream(); } - private void superInterfaces(Class c, Set> interfaces) { - if (isESPlugin(c)) { + private static void superInterfaces(Class c, Set> interfaces, Predicate> classPredicate) { + if (classPredicate.test(c)) { interfaces.add(c); } - Arrays.stream(c.getInterfaces()).forEach(inf -> superInterfaces(inf, interfaces)); - } - - private boolean isESPlugin(Class c) { - return pluginClasses.contains(c); + Arrays.stream(c.getInterfaces()).forEach(inf -> superInterfaces(inf, interfaces, classPredicate)); } } diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java index 7e2e13d5343f5..a15c86b86993d 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -16,6 +16,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -89,6 +91,7 @@ record LoadedPlugin(PluginDescriptor descriptor, Plugin instance, ClassLoader lo } private static final Logger logger = LogManager.getLogger(PluginsService.class); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(PluginsService.class); private final Settings settings; private final Path configPath; @@ -148,9 +151,13 @@ public PluginsService(Settings settings, Path configPath, Path modulesDirectory, } Map loadedPlugins = loadBundles(seenBundles); - this.info = new PluginsAndModules(getRuntimeInfos(pluginsList, loadedPlugins), modulesList); + + var inspector = PluginIntrospector.getInstance(); + this.info = new PluginsAndModules(getRuntimeInfos(inspector, pluginsList, loadedPlugins), modulesList); this.plugins = List.copyOf(loadedPlugins.values()); + checkDeprecations(inspector, pluginsList, loadedPlugins); + checkMandatoryPlugins( pluginsList.stream().map(PluginDescriptor::getName).collect(Collectors.toSet()), new HashSet<>(MANDATORY_SETTING.get(settings)) @@ -190,8 +197,11 @@ private static void logPluginInfo(final List pluginDescriptors } } - private static List getRuntimeInfos(List pluginDescriptors, Map plugins) { - var plugInspector = PluginIntrospector.getInstance(); + private static List getRuntimeInfos( + PluginIntrospector inspector, + List pluginDescriptors, + Map plugins + ) { var officialPlugins = getOfficialPlugins(); List runtimeInfos = new ArrayList<>(); for (PluginDescriptor descriptor : pluginDescriptors) { @@ -201,7 +211,7 @@ private static List getRuntimeInfos(List pl boolean isOfficial = officialPlugins.contains(descriptor.getName()); PluginApiInfo apiInfo = null; if (isOfficial == false) { - apiInfo = new PluginApiInfo(plugInspector.interfaces(pluginClazz), plugInspector.overriddenMethods(pluginClazz)); + apiInfo = new PluginApiInfo(inspector.interfaces(pluginClazz), inspector.overriddenMethods(pluginClazz)); } runtimeInfos.add(new PluginRuntimeInfo(descriptor, isOfficial, apiInfo)); } @@ -499,6 +509,42 @@ static LayerAndLoader createPlugin( } } + private static void checkDeprecations( + PluginIntrospector inspector, + List pluginDescriptors, + Map plugins + ) { + for (PluginDescriptor descriptor : pluginDescriptors) { + LoadedPlugin plugin = plugins.get(descriptor.getName()); + Class pluginClazz = plugin.instance.getClass(); + for (String deprecatedInterface : inspector.deprecatedInterfaces(pluginClazz)) { + deprecationLogger.warn( + DeprecationCategory.PLUGINS, + pluginClazz.getName() + deprecatedInterface, + "Plugin class {} from plugin {} implements deprecated plugin interface {}. " + + "This plugin interface will be removed in a future release.", + pluginClazz.getName(), + descriptor.getName(), + deprecatedInterface + ); + } + for (var deprecatedMethodInInterface : inspector.deprecatedMethods(pluginClazz).entrySet()) { + String methodName = deprecatedMethodInInterface.getKey(); + String interfaceName = deprecatedMethodInInterface.getValue(); + deprecationLogger.warn( + DeprecationCategory.PLUGINS, + pluginClazz.getName() + methodName + interfaceName, + "Plugin class {} from plugin {} implements deprecated method {} from plugin interface {}. " + + "This method will be removed in a future release.", + pluginClazz.getName(), + descriptor.getName(), + methodName, + interfaceName + ); + } + } + } + /** * Reloads all Lucene SPI implementations using the new classloader. * This method must be called after the new classloader has been created to diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java index 9f2816fb48028..30fe3c770efb6 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java @@ -9,8 +9,10 @@ package org.elasticsearch.plugins; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -45,11 +47,13 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Supplier; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasEntry; public class PluginIntrospectorTests extends ESTestCase { @@ -375,4 +379,19 @@ public void signalShutdown(List shutdownNodeIds) {} assertThat(pluginIntrospector.overriddenMethods(AbstractShutdownAwarePlugin.class), empty()); assertThat(pluginIntrospector.interfaces(AbstractShutdownAwarePlugin.class), contains("ShutdownAwarePlugin")); } + + public void testDeprecatedInterface() { + class DeprecatedPlugin extends Plugin implements NetworkPlugin {} + assertThat(pluginIntrospector.deprecatedInterfaces(DeprecatedPlugin.class), contains("NetworkPlugin")); + } + + public void testDeprecatedMethod() { + class JoinValidatorPlugin extends Plugin implements DiscoveryPlugin { + @Override + public BiConsumer getJoinValidator() { + return null; + } + } + assertThat(pluginIntrospector.deprecatedMethods(JoinValidatorPlugin.class), hasEntry("getJoinValidator", "DiscoveryPlugin")); + } } diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index baa56b0978bc1..4bf3c6f66eafa 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -711,6 +711,56 @@ public void testLoadServiceProvidersInSameClassLoader() { assertThat(providers, allOf(hasSize(1), everyItem(instanceOf(BarTestService.class)))); } + public void testDeprecatedPluginInterface() throws Exception { + final Path home = createTempDir(); + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build(); + final Path plugins = home.resolve("plugins"); + final Path plugin = plugins.resolve("deprecated-plugin"); + Files.createDirectories(plugin); + PluginTestUtil.writeSimplePluginDescriptor(plugin, "deprecated-plugin", "p.DeprecatedPlugin"); + Path jar = plugin.resolve("impl.jar"); + JarUtils.createJarWithEntries(jar, Map.of("p/DeprecatedPlugin.class", InMemoryJavaCompiler.compile("p.DeprecatedPlugin", """ + package p; + import org.elasticsearch.plugins.*; + public class DeprecatedPlugin extends Plugin implements NetworkPlugin {} + """))); + + newPluginsService(settings); + assertWarnings( + "Plugin class p.DeprecatedPlugin from plugin deprecated-plugin implements " + + "deprecated plugin interface NetworkPlugin. " + + "This plugin interface will be removed in a future release." + ); + } + + public void testDeprecatedPluginMethod() throws Exception { + final Path home = createTempDir(); + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build(); + final Path plugins = home.resolve("plugins"); + final Path plugin = plugins.resolve("deprecated-plugin"); + Files.createDirectories(plugin); + PluginTestUtil.writeSimplePluginDescriptor(plugin, "deprecated-plugin", "p.DeprecatedPlugin"); + Path jar = plugin.resolve("impl.jar"); + JarUtils.createJarWithEntries(jar, Map.of("p/DeprecatedPlugin.class", InMemoryJavaCompiler.compile("p.DeprecatedPlugin", """ + package p; + import java.util.Map; + import org.elasticsearch.plugins.*; + import org.elasticsearch.cluster.coordination.ElectionStrategy; + public class DeprecatedPlugin extends Plugin implements DiscoveryPlugin { + @Override + public Map getElectionStrategies() { + return Map.of(); + } + } + """))); + + newPluginsService(settings); + assertWarnings( + "Plugin class p.DeprecatedPlugin from plugin deprecated-plugin implements deprecated method " + + "getElectionStrategies from plugin interface DiscoveryPlugin. This method will be removed in a future release." + ); + } + private static class TestExtensiblePlugin extends Plugin implements ExtensiblePlugin { private List extensions; diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java index 6d633e00a3e17..bd3e9dec5e84d 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java @@ -8,6 +8,8 @@ package org.elasticsearch.plugins; +import org.elasticsearch.Version; + import java.io.IOException; import java.io.OutputStream; import java.nio.file.Files; @@ -25,6 +27,24 @@ public static void writeStablePluginProperties(Path pluginDir, String... stringP writeProperties(pluginDir.resolve(PluginDescriptor.STABLE_DESCRIPTOR_FILENAME), stringProps); } + public static void writeSimplePluginDescriptor(Path pluginDir, String name, String classname) throws IOException { + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "description", + "name", + name, + "version", + "1.0.0", + "elasticsearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version"), + "classname", + classname + ); + } + /** convenience method to write a plugin properties file */ private static void writeProperties(Path propertiesFile, String... stringProps) throws IOException { assert stringProps.length % 2 == 0; From 18245ec9ef3637ffb6f5e3300d8de25dbe0b0331 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 29 Jul 2022 15:39:04 -0700 Subject: [PATCH 016/265] Fail eagerly when beats artifacts don't exist --- .ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml index c4050517d3918..5ffcfc25cde2f 100644 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml +++ b/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml @@ -22,8 +22,8 @@ export BEATS_DIR=$(pwd)/distribution/docker/build/artifacts/beats mkdir -p ${BEATS_DIR} - curl -o "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-x86_64.tar.gz" https://snapshots-no-kpi.elastic.co/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz - curl -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-x86_64.tar.gz" https://snapshots-no-kpi.elastic.co/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz + curl --fail -o "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-x86_64.tar.gz" https://snapshots-no-kpi.elastic.co/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz + curl --fail -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-x86_64.tar.gz" https://snapshots-no-kpi.elastic.co/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz $WORKSPACE/.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dbuild.snapshot=false \ -Dtests.jvm.argline=-Dbuild.snapshot=false -Dlicense.key=${WORKSPACE}/x-pack/license-tools/src/test/resources/public.key -Dbuild.id=deadbeef build From 2c975eb9485bf50f86f3259a283d8bd35771f579 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Sat, 30 Jul 2022 09:38:22 +0100 Subject: [PATCH 017/265] Ensure that the FileSettingService closes the config file input stream (#88953) --- .../service/FileSettingsService.java | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index a8141e8f711fa..7fcc6c8a5b9c1 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.env.Environment; -import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.BufferedInputStream; @@ -303,15 +302,20 @@ private WatchKey enableSettingsWatcher(WatchKey previousKey, Path settingsDir) t CountDownLatch processFileSettings(Path path, Consumer errorHandler) throws IOException { CountDownLatch waitForCompletion = new CountDownLatch(1); logger.info("processing path [{}] for [{}]", path, NAMESPACE); - try (var json = new BufferedInputStream(Files.newInputStream(path))) { - try (XContentParser parser = JSON.xContent().createParser(XContentParserConfiguration.EMPTY, json)) { - stateService.process(NAMESPACE, parser, (e) -> { + try ( + var fis = Files.newInputStream(path); + var bis = new BufferedInputStream(fis); + var parser = JSON.xContent().createParser(XContentParserConfiguration.EMPTY, bis) + ) { + stateService.process(NAMESPACE, parser, (e) -> { + try { if (e != null) { errorHandler.accept(e); } + } finally { waitForCompletion.countDown(); - }); - } + } + }); } return waitForCompletion; From a0e55a6ed7cbfd1eedd6529c625ca7d07e1b297f Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Sun, 31 Jul 2022 11:52:52 +0100 Subject: [PATCH 018/265] Fix PluginsServiceTests on Windows (#88971) --- .../plugins/PluginsServiceTests.java | 54 ++++++++++++------- 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 4bf3c6f66eafa..afc1a5c5a101b 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.io.InputStream; +import java.io.UncheckedIOException; import java.lang.reflect.InvocationTargetException; import java.net.URL; import java.net.URLClassLoader; @@ -382,12 +383,7 @@ public void testPluginNameClash() throws IOException { } public void testExistingMandatoryInstalledPlugin() throws IOException { - // This test opens a child classloader, reading a jar under the test temp - // dir (a dummy plugin). Classloaders are closed by GC, so when test teardown - // occurs the jar is deleted while the classloader is still open. However, on - // windows, files cannot be deleted when they are still open by a process. - assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - final Path pathHome = createTempDir(); + final Path pathHome = createTempDir(getTestName()); final Path plugins = pathHome.resolve("plugins"); final Path fake = plugins.resolve("fake"); @@ -411,7 +407,8 @@ public void testExistingMandatoryInstalledPlugin() throws IOException { } final Settings settings = Settings.builder().put("path.home", pathHome).put("plugin.mandatory", "fake").build(); - newPluginsService(settings); + var pluginsService = newPluginsService(settings); + closePluginLoaders(pluginsService); } public void testPluginFromParentClassLoader() throws IOException { @@ -725,12 +722,16 @@ public void testDeprecatedPluginInterface() throws Exception { public class DeprecatedPlugin extends Plugin implements NetworkPlugin {} """))); - newPluginsService(settings); - assertWarnings( - "Plugin class p.DeprecatedPlugin from plugin deprecated-plugin implements " - + "deprecated plugin interface NetworkPlugin. " - + "This plugin interface will be removed in a future release." - ); + var pluginService = newPluginsService(settings); + try { + assertWarnings( + "Plugin class p.DeprecatedPlugin from plugin deprecated-plugin implements " + + "deprecated plugin interface NetworkPlugin. " + + "This plugin interface will be removed in a future release." + ); + } finally { + closePluginLoaders(pluginService); + } } public void testDeprecatedPluginMethod() throws Exception { @@ -754,11 +755,28 @@ public Map getElectionStrategies() { } """))); - newPluginsService(settings); - assertWarnings( - "Plugin class p.DeprecatedPlugin from plugin deprecated-plugin implements deprecated method " - + "getElectionStrategies from plugin interface DiscoveryPlugin. This method will be removed in a future release." - ); + var pluginService = newPluginsService(settings); + try { + assertWarnings( + "Plugin class p.DeprecatedPlugin from plugin deprecated-plugin implements deprecated method " + + "getElectionStrategies from plugin interface DiscoveryPlugin. This method will be removed in a future release." + ); + } finally { + closePluginLoaders(pluginService); + } + } + + // Closes the URLClassLoaders of plugins loaded by the given plugin service. + static void closePluginLoaders(PluginsService pluginService) { + for (var lp : pluginService.plugins()) { + if (lp.loader()instanceof URLClassLoader urlClassLoader) { + try { + PrivilegedOperations.closeURLClassLoader(urlClassLoader); + } catch (IOException unexpected) { + throw new UncheckedIOException(unexpected); + } + } + } } private static class TestExtensiblePlugin extends Plugin implements ExtensiblePlugin { From ed564f6e1d00c95fc5ec3cd4550955a017c76822 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 1 Aug 2022 07:21:13 +0200 Subject: [PATCH 019/265] Update lo lucene-9.3.0 (#88927) --- build-tools-internal/version.properties | 2 +- .../licenses/lucene-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - modules/lang-expression/licenses/lucene-codecs-9.3.0.jar.sha1 | 1 + .../lucene-expressions-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - .../lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 | 1 + .../lucene-spatial-extras-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - .../legacy-geo/licenses/lucene-spatial-extras-9.3.0.jar.sha1 | 1 + .../lucene-spatial3d-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - modules/legacy-geo/licenses/lucene-spatial3d-9.3.0.jar.sha1 | 1 + .../lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 | 1 + ...lucene-analysis-kuromoji-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - .../licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 | 1 + .../lucene-analysis-nori-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - .../analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 | 1 + ...lucene-analysis-phonetic-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - .../licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 | 1 + .../lucene-analysis-smartcn-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - .../licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 | 1 + .../lucene-analysis-stempel-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - .../licenses/lucene-analysis-stempel-9.3.0.jar.sha1 | 1 + ...cene-analysis-morfologik-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - .../licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 | 1 + .../lucene-analysis-common-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-analysis-common-9.3.0.jar.sha1 | 1 + .../lucene-backward-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 | 1 + server/licenses/lucene-core-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-core-9.3.0.jar.sha1 | 1 + .../lucene-grouping-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-grouping-9.3.0.jar.sha1 | 1 + .../lucene-highlighter-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-highlighter-9.3.0.jar.sha1 | 1 + server/licenses/lucene-join-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-join-9.3.0.jar.sha1 | 1 + .../licenses/lucene-memory-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-memory-9.3.0.jar.sha1 | 1 + server/licenses/lucene-misc-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-misc-9.3.0.jar.sha1 | 1 + .../licenses/lucene-queries-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-queries-9.3.0.jar.sha1 | 1 + .../lucene-queryparser-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-queryparser-9.3.0.jar.sha1 | 1 + .../licenses/lucene-sandbox-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.3.0.jar.sha1 | 1 + .../licenses/lucene-suggest-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - server/licenses/lucene-suggest-9.3.0.jar.sha1 | 1 + .../lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 | 1 - x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0.jar.sha1 | 1 + 49 files changed, 25 insertions(+), 25 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-codecs-9.3.0.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 delete mode 100644 modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0.jar.sha1 delete mode 100644 modules/legacy-geo/licenses/lucene-spatial3d-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 modules/legacy-geo/licenses/lucene-spatial3d-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-core-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-join-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.3.0.jar.sha1 delete mode 100644 x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 create mode 100644 x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0.jar.sha1 diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index b16a7d4a7667e..52379cf3fb557 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.5.0 -lucene = 9.3.0-snapshot-b8d1fcfd0ec +lucene = 9.3.0 bundled_jdk_vendor = openjdk bundled_jdk = 18.0.2+9@f6ad4b4450fd4d298113270ec84f30ee diff --git a/modules/lang-expression/licenses/lucene-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/modules/lang-expression/licenses/lucene-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 47b8df46111d4..0000000000000 --- a/modules/lang-expression/licenses/lucene-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f9d91e4de3468b4c513a82a3d20d9d19137c4311 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-codecs-9.3.0.jar.sha1 b/modules/lang-expression/licenses/lucene-codecs-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..11661ba525168 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-codecs-9.3.0.jar.sha1 @@ -0,0 +1 @@ +da4e2de2008a0e8c33da7177b85225604cb5200e \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 544a44a26debb..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b146dc1d898b3f638328a4d6a64f68cfede251ec \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..2d216277b3a8e --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 @@ -0,0 +1 @@ +5583bcd3a24d3aae40b0a3152458021844ac09aa \ No newline at end of file diff --git a/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 5e9c50d838196..0000000000000 --- a/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f87c4435a856c612a5799fa89397364a7b2d6f7e \ No newline at end of file diff --git a/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0.jar.sha1 b/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..8bbc5359487ff --- /dev/null +++ b/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0.jar.sha1 @@ -0,0 +1 @@ +c9b226b49ae987a4226791f023562187583eb9ad \ No newline at end of file diff --git a/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 6c74a9716f82f..0000000000000 --- a/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -110e8b2e5bced4b8f482ac58a2cf9cd64591b028 \ No newline at end of file diff --git a/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0.jar.sha1 b/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..31132ef0ad6df --- /dev/null +++ b/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0.jar.sha1 @@ -0,0 +1 @@ +201aa61856ae44fa494504591aed54fd9b75af16 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index ceea8ba4f6855..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -501aa4f0028424a994b06627f30ffb36150ffbe2 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..df4ae8d72dd2b --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 @@ -0,0 +1 @@ +11dd9be0448fe594cf918f5260e193b3ab4e07a0 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 929be5cd0d86f..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4a84f37391ab5da0697ba6344555b633aa4bacd \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..675bf726d2a65 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 @@ -0,0 +1 @@ +87c1357612f2f483174d1a63ea8c6680a1696bac \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 60c85b324c183..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -012f177949d83aa7bdf26c309f5569f67d1c65b5 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..8987f89c913df --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 @@ -0,0 +1 @@ +5d032dbeb3f4015741336a877dd4b0e62099246c \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 782b48c8fd4df..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -792f50d6cd8b75c277c514f2f6e9914572942dfe \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..00d66c733c548 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 @@ -0,0 +1 @@ +fe6ac8772b545e0abd0c755cd4bd07caad58edb9 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 4b9ceb4a4581f..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1488267195c87749dcc42de6b2f665d24ff8d9e \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..0c521b5f5ef6a --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 @@ -0,0 +1 @@ +288726e13b598c341e81aef8b5c9ce53f51889d0 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 45ccdbf538570..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c78968b087eaf2a95ed3b67540efc32455bab84d \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..ba98dd7e06f71 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 @@ -0,0 +1 @@ +166d02f7f98f18c6607335030a404fcad8f57cd6 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 91d9d4c9452b2..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -978ee14dad7edab6384d04655ce1db219547b6d8 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..88ac9a13e8ce3 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 @@ -0,0 +1 @@ +3c0e4177aa87a4be2826a360f656f3559ea3f997 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 6782780d6cbd4..0000000000000 --- a/server/licenses/lucene-analysis-common-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a154a194ea505d27b538270ee2db2b5a4a38371 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..2e260eb028f4c --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 @@ -0,0 +1 @@ +03496708a19a8a55a0dc4f61f8aa2febc6e8977c \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 220d2b83dacd9..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ad36d2a32c1dda37040cdfed9dcdf294b8f3b7c \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..1dda17ee92fdb --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 @@ -0,0 +1 @@ +95ea01ee0d1e543e18e3cf58d8a6a27a587a7239 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-core-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 1acc580cf4a7e..0000000000000 --- a/server/licenses/lucene-core-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e41aa9fe38033e61da13fe420aa6e9400f467dd8 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0.jar.sha1 b/server/licenses/lucene-core-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..fd870008c5bd4 --- /dev/null +++ b/server/licenses/lucene-core-9.3.0.jar.sha1 @@ -0,0 +1 @@ +a030180999bc3f1a65f23f53b38098ca9daeee79 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-grouping-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 25f07a5af5a69..0000000000000 --- a/server/licenses/lucene-grouping-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -71cd063e306af5acf1cef0492eebbbf000e6a6ce \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0.jar.sha1 b/server/licenses/lucene-grouping-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..6f63ca177d3c3 --- /dev/null +++ b/server/licenses/lucene-grouping-9.3.0.jar.sha1 @@ -0,0 +1 @@ +883071196e53ec93d2a53dcc8211ee30be6c00dc \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 263f6dd6b208e..0000000000000 --- a/server/licenses/lucene-highlighter-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adc913180fac1b221f57288661f069cb7a240127 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..78264d8ee3713 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.3.0.jar.sha1 @@ -0,0 +1 @@ +7e895c49b9991ea2ec08855c425b9eae44a08764 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-join-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 638bf0f37a91f..0000000000000 --- a/server/licenses/lucene-join-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3618de63e62d734ab1892ff446ae4f5ef866bee6 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0.jar.sha1 b/server/licenses/lucene-join-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..5e641f5f01075 --- /dev/null +++ b/server/licenses/lucene-join-9.3.0.jar.sha1 @@ -0,0 +1 @@ +04baaae4ce4a35ae919150dd17cd1e63b0da9d24 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-memory-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 3ca4420b49396..0000000000000 --- a/server/licenses/lucene-memory-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -07e0de548fc392428545db40192280b4f83daf4f \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0.jar.sha1 b/server/licenses/lucene-memory-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..c8e86c7674ede --- /dev/null +++ b/server/licenses/lucene-memory-9.3.0.jar.sha1 @@ -0,0 +1 @@ +1a2203b332edc1366b9789f5286296e109dbc8c4 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-misc-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 09ecd52494738..0000000000000 --- a/server/licenses/lucene-misc-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab308291a7dd5ec9988a229dc8e7c27fc2bb5409 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0.jar.sha1 b/server/licenses/lucene-misc-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..11a459a9f52ba --- /dev/null +++ b/server/licenses/lucene-misc-9.3.0.jar.sha1 @@ -0,0 +1 @@ +61b502c9557247b6803a346c0bab20c9dc89d125 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-queries-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 8d2959d64aac3..0000000000000 --- a/server/licenses/lucene-queries-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b5804be2c87d995c5255ff1ad739052fc243661 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0.jar.sha1 b/server/licenses/lucene-queries-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..2b577bd33b46a --- /dev/null +++ b/server/licenses/lucene-queries-9.3.0.jar.sha1 @@ -0,0 +1 @@ +d8fe3bce3c05015c5fdb78279f36b9f1a75b98d8 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 0be8f71b787a2..0000000000000 --- a/server/licenses/lucene-queryparser-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a55d8a68cccaaf4af5a973c4332519d3eb477068 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..b106860bf9f3e --- /dev/null +++ b/server/licenses/lucene-queryparser-9.3.0.jar.sha1 @@ -0,0 +1 @@ +78f259a66d48f77a2d2b96a0a858efa08eba72dc \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index da0e369ccba29..0000000000000 --- a/server/licenses/lucene-sandbox-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6005b6b9b09b1da1c3c74558693824f429e55d \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..82c2c6d85ca4c --- /dev/null +++ b/server/licenses/lucene-sandbox-9.3.0.jar.sha1 @@ -0,0 +1 @@ +5ee318cf8e9a70c2c99e03e157465316a3d4a17a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-suggest-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 38a6a7ca5e787..0000000000000 --- a/server/licenses/lucene-suggest-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c5418c469be74cc5df3427ac07386598c18882 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0.jar.sha1 b/server/licenses/lucene-suggest-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..71a263aa163f8 --- /dev/null +++ b/server/licenses/lucene-suggest-9.3.0.jar.sha1 @@ -0,0 +1 @@ +fb5d7243ba67616edbda1ecf421c615dd595752d \ No newline at end of file diff --git a/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index ceea8ba4f6855..0000000000000 --- a/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -501aa4f0028424a994b06627f30ffb36150ffbe2 \ No newline at end of file diff --git a/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0.jar.sha1 b/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..df4ae8d72dd2b --- /dev/null +++ b/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0.jar.sha1 @@ -0,0 +1 @@ +11dd9be0448fe594cf918f5260e193b3ab4e07a0 \ No newline at end of file From aa7d0a6cf8f51794ea2d0130083a61eb7fd3e450 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 1 Aug 2022 08:27:41 +0200 Subject: [PATCH 020/265] Improve EQL Sequence circuit breaker precision (#88538) Fixes #88300 --- docs/changelog/88538.yaml | 6 + .../execution/sequence/SequenceMatcher.java | 39 ++-- .../sequence/CircuitBreakerTests.java | 216 ++++++++++-------- 3 files changed, 141 insertions(+), 120 deletions(-) create mode 100644 docs/changelog/88538.yaml diff --git a/docs/changelog/88538.yaml b/docs/changelog/88538.yaml new file mode 100644 index 0000000000000..1d0498e59c3d0 --- /dev/null +++ b/docs/changelog/88538.yaml @@ -0,0 +1,6 @@ +pr: 88538 +summary: Improve EQL Sequence circuit breaker precision +area: EQL +type: bug +issues: + - 88300 diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java index 550c65da64d3a..8510f8a2debd0 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java @@ -82,7 +82,10 @@ public void clear() { private final Stats stats = new Stats(); private boolean headLimit = false; - private long totalRamBytesUsed = 0; + + // circuit breaker accounting + private long prevRamBytesUsedInFlight = 0; + private long prevRamBytesUsedCompleted = 0; @SuppressWarnings("rawtypes") public SequenceMatcher(int stages, boolean descending, TimeValue maxSpan, Limit limit, CircuitBreaker circuitBreaker) { @@ -114,9 +117,6 @@ private void trackSequence(Sequence sequence) { * Returns false if the process needs to be stopped. */ boolean match(int stage, Iterable> hits) { - long ramBytesUsedInFlight = ramBytesUsedInFlight(); - long ramBytesUsedCompleted = ramBytesUsedCompleted(); - for (Tuple tuple : hits) { KeyAndOrdinal ko = tuple.v1(); HitReference hit = tuple.v2(); @@ -145,7 +145,7 @@ boolean match(int stage, Iterable> hits) { log.trace("{}", stats); matched = true; } - trackMemory(ramBytesUsedInFlight, ramBytesUsedCompleted); + trackMemory(); return matched; } @@ -305,22 +305,20 @@ public void clear() { clearCircuitBreaker(); } - private long ramBytesUsedInFlight() { + // protected for testing purposes + protected long ramBytesUsedInFlight() { return RamUsageEstimator.sizeOf(keyToSequences) + RamUsageEstimator.sizeOf(stageToKeys); } - private long ramBytesUsedCompleted() { + // protected for testing purposes + protected long ramBytesUsedCompleted() { return RamUsageEstimator.sizeOfCollection(completed); } - private void addMemory(long bytes, String label) { - totalRamBytesUsed += bytes; - circuitBreaker.addEstimateBytesAndMaybeBreak(bytes, label); - } - private void clearCircuitBreaker() { - circuitBreaker.addWithoutBreaking(-totalRamBytesUsed); - totalRamBytesUsed = 0; + circuitBreaker.addWithoutBreaking(-prevRamBytesUsedInFlight - prevRamBytesUsedCompleted); + prevRamBytesUsedInFlight = 0; + prevRamBytesUsedCompleted = 0; } // The method is called at the end of match() which is called for every sub query in the sequence query @@ -328,11 +326,14 @@ private void clearCircuitBreaker() { // expensive, so we just calculate the difference in bytes of the total memory that the matcher's // structure occupy for the in-flight tracking of sequences, as well as for the list of completed // sequences. - private void trackMemory(long prevRamBytesUsedInflight, long prevRamBytesUsedCompleted) { - long bytesDiff = ramBytesUsedInFlight() - prevRamBytesUsedInflight; - addMemory(bytesDiff, CB_INFLIGHT_LABEL); - bytesDiff = ramBytesUsedCompleted() - prevRamBytesUsedCompleted; - addMemory(bytesDiff, CB_COMPLETED_LABEL); + private void trackMemory() { + long newRamBytesUsedInFlight = ramBytesUsedInFlight(); + circuitBreaker.addEstimateBytesAndMaybeBreak(newRamBytesUsedInFlight - prevRamBytesUsedInFlight, CB_INFLIGHT_LABEL); + prevRamBytesUsedInFlight = newRamBytesUsedInFlight; + + long newRamBytesUsedCompleted = ramBytesUsedCompleted(); + circuitBreaker.addEstimateBytesAndMaybeBreak(newRamBytesUsedCompleted - prevRamBytesUsedCompleted, CB_COMPLETED_LABEL); + prevRamBytesUsedCompleted = newRamBytesUsedCompleted; } @Override diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java index ef56f5c160604..7787f3e6ef171 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java @@ -132,27 +132,7 @@ public void fetchHits(Iterable> refs, ActionListener> criteria = new ArrayList<>(stages); - - for (int i = 0; i < stages; i++) { - final int j = i; - criteria.add( - new Criterion<>( - i, - new BoxedQueryRequest( - () -> SearchSourceBuilder.searchSource().size(10).query(matchAllQuery()).terminateAfter(j), - "@timestamp", - emptyList(), - emptySet() - ), - keyExtractors, - tsExtractor, - null, - implicitTbExtractor, - false - ) - ); - } + List> criteria = buildCriteria(stages); SequenceMatcher matcher = new SequenceMatcher(stages, false, TimeValue.MINUS_ONE, null, CIRCUIT_BREAKER); TumblingWindow window = new TumblingWindow(client, criteria, null, matcher); @@ -187,8 +167,10 @@ public void testCircuitBreakerSequenceMatcher() { assertEquals("sequence_inflight", e.getMessage()); // Break on second iteration - SequenceMatcher matcher2 = new SequenceMatcher(stages, false, TimeValue.MINUS_ONE, null, new EqlTestCircuitBreaker(15000)); + EqlTestCircuitBreaker breaker = new EqlTestCircuitBreaker(15000); + SequenceMatcher matcher2 = new SequenceMatcher(stages, false, TimeValue.MINUS_ONE, null, breaker); matcher2.match(0, hits); + assertEquals(matcher2.ramBytesUsedInFlight() + matcher2.ramBytesUsedCompleted(), breaker.ramBytesUsed); e = expectThrows(CircuitBreakingException.class, () -> matcher2.match(0, hits)); assertEquals("sequence_inflight", e.getMessage()); @@ -210,92 +192,18 @@ public void testMemoryClearedOnShardsException() { } private void assertMemoryCleared(int sequenceFiltersCount, BiFunction esClientSupplier) { - final int SEARCH_REQUESTS_EXPECTED_COUNT = 2; - List eqlBreakerSettings = Collections.singletonList( - new BreakerSettings( - CIRCUIT_BREAKER_NAME, - CIRCUIT_BREAKER_LIMIT, - CIRCUIT_BREAKER_OVERHEAD, - CircuitBreaker.Type.MEMORY, - CircuitBreaker.Durability.TRANSIENT - ) - ); + final int searchRequestsExpectedCount = 2; try ( CircuitBreakerService service = new HierarchyCircuitBreakerService( Settings.EMPTY, - eqlBreakerSettings, + breakerSettings(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); - ESMockClient esClient = esClientSupplier.apply(service.getBreaker(CIRCUIT_BREAKER_NAME), SEARCH_REQUESTS_EXPECTED_COUNT); + ESMockClient esClient = esClientSupplier.apply(service.getBreaker(CIRCUIT_BREAKER_NAME), searchRequestsExpectedCount); ) { CircuitBreaker eqlCircuitBreaker = service.getBreaker(CIRCUIT_BREAKER_NAME); - EqlConfiguration eqlConfiguration = new EqlConfiguration( - new String[] { "test" }, - org.elasticsearch.xpack.ql.util.DateUtils.UTC, - "nobody", - "cluster", - null, - emptyMap(), - null, - TimeValue.timeValueSeconds(30), - null, - 123, - "", - new TaskId("test", 123), - new EqlSearchTask( - randomLong(), - "transport", - EqlSearchAction.NAME, - "", - null, - emptyMap(), - emptyMap(), - new AsyncExecutionId("", new TaskId(randomAlphaOfLength(10), 1)), - TimeValue.timeValueDays(5) - ), - x -> Collections.emptySet() - ); - IndexResolver indexResolver = new IndexResolver( - esClient, - "cluster", - DefaultDataTypeRegistry.INSTANCE, - () -> { return emptySet(); } - ); - EqlSession eqlSession = new EqlSession( - esClient, - eqlConfiguration, - indexResolver, - new PreAnalyzer(), - new PostAnalyzer(), - new EqlFunctionRegistry(), - new Verifier(new Metrics()), - new Optimizer(), - new Planner(), - eqlCircuitBreaker - ); - QueryClient eqlClient = new PITAwareQueryClient(eqlSession); - List> criteria = new ArrayList<>(sequenceFiltersCount); - - for (int i = 0; i < sequenceFiltersCount; i++) { - final int j = i; - criteria.add( - new Criterion<>( - i, - new BoxedQueryRequest( - () -> SearchSourceBuilder.searchSource().size(10).query(matchAllQuery()).terminateAfter(j), - "@timestamp", - emptyList(), - emptySet() - ), - keyExtractors, - tsExtractor, - null, - implicitTbExtractor, - false - ) - ); - } - + QueryClient eqlClient = buildQueryClient(esClient, eqlCircuitBreaker); + List> criteria = buildCriteria(sequenceFiltersCount); SequenceMatcher matcher = new SequenceMatcher(sequenceFiltersCount, false, TimeValue.MINUS_ONE, null, eqlCircuitBreaker); TumblingWindow window = new TumblingWindow(eqlClient, criteria, null, matcher); window.execute(wrap(p -> {}, ex -> {})); @@ -306,6 +214,112 @@ private void assertMemoryCleared(int sequenceFiltersCount, BiFunction> criteria = buildCriteria(sequenceFiltersCount); + + SequenceMatcher matcher = new SequenceMatcher(sequenceFiltersCount, false, TimeValue.MINUS_ONE, null, eqlCircuitBreaker); + TumblingWindow window = new TumblingWindow(eqlClient, criteria, null, matcher); + window.execute(wrap(p -> fail(), ex -> assertTrue(ex instanceof CircuitBreakingException))); + } + } + + private List breakerSettings() { + List eqlBreakerSettings = Collections.singletonList( + new BreakerSettings( + CIRCUIT_BREAKER_NAME, + CIRCUIT_BREAKER_LIMIT, + CIRCUIT_BREAKER_OVERHEAD, + CircuitBreaker.Type.MEMORY, + CircuitBreaker.Durability.TRANSIENT + ) + ); + return eqlBreakerSettings; + } + + private List> buildCriteria(int sequenceFiltersCount) { + List> criteria = new ArrayList<>(sequenceFiltersCount); + for (int i = 0; i < sequenceFiltersCount; i++) { + final int j = i; + criteria.add( + new Criterion<>( + i, + new BoxedQueryRequest( + () -> SearchSourceBuilder.searchSource().size(10).query(matchAllQuery()).terminateAfter(j), + "@timestamp", + emptyList(), + emptySet() + ), + keyExtractors, + tsExtractor, + null, + implicitTbExtractor, + false + ) + ); + } + return criteria; + } + + private QueryClient buildQueryClient(ESMockClient esClient, CircuitBreaker eqlCircuitBreaker) { + EqlConfiguration eqlConfiguration = new EqlConfiguration( + new String[] { "test" }, + org.elasticsearch.xpack.ql.util.DateUtils.UTC, + "nobody", + "cluster", + null, + emptyMap(), + null, + TimeValue.timeValueSeconds(30), + null, + 123, + "", + new TaskId("test", 123), + new EqlSearchTask( + randomLong(), + "transport", + EqlSearchAction.NAME, + "", + null, + emptyMap(), + emptyMap(), + new AsyncExecutionId("", new TaskId(randomAlphaOfLength(10), 1)), + TimeValue.timeValueDays(5) + ), + x -> Collections.emptySet() + ); + IndexResolver indexResolver = new IndexResolver(esClient, "cluster", DefaultDataTypeRegistry.INSTANCE, Collections::emptySet); + EqlSession eqlSession = new EqlSession( + esClient, + eqlConfiguration, + indexResolver, + new PreAnalyzer(), + new PostAnalyzer(), + new EqlFunctionRegistry(), + new Verifier(new Metrics()), + new Optimizer(), + new Planner(), + eqlCircuitBreaker + ); + return new PITAwareQueryClient(eqlSession); + } + /** * A type of internal Node client that deals with three types of requests: open PIT, close PIT and SearchRequest. * This class is used by {@code CircuitBreakerTests#testMemoryClearedOnSuccessfulRequest()} and From 668ff6a07e3cf4cc7e16cde68909229212aac70b Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Mon, 1 Aug 2022 09:23:09 +0200 Subject: [PATCH 021/265] Simplify allocation service (#88850) This pr simplifies allocation service by extracting common code to a method. --- .../routing/allocation/AllocationService.java | 70 +++++-------------- 1 file changed, 17 insertions(+), 53 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 41093785b0c8d..f48bea901ffac 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -127,15 +127,7 @@ public ClusterState applyStartedShards(ClusterState clusterState, List(startedShards); startedShards.sort(Comparator.comparing(ShardRouting::primary)); @@ -205,23 +197,16 @@ public ClusterState applyFailedShards( } ClusterState tmpState = IndexMetadataUpdater.removeStaleIdsWithoutRoutings(clusterState, staleShards, logger); - RoutingNodes routingNodes = getMutableRoutingNodes(tmpState); long currentNanoTime = currentNanoTime(); - RoutingAllocation allocation = new RoutingAllocation( - allocationDeciders, - routingNodes, - tmpState, - clusterInfoService.getClusterInfo(), - snapshotsInfoService.snapshotShardSizes(), - currentNanoTime - ); + RoutingAllocation allocation = createRoutingAllocation(tmpState, currentNanoTime); for (FailedShard failedShardEntry : failedShards) { ShardRouting shardToFail = failedShardEntry.routingEntry(); IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardToFail.shardId().getIndex()); allocation.addIgnoreShardForNode(shardToFail.shardId(), shardToFail.currentNodeId()); // failing a primary also fails initializing replica shards, re-resolve ShardRouting - ShardRouting failedShard = routingNodes.getByAllocationId(shardToFail.shardId(), shardToFail.allocationId().getId()); + ShardRouting failedShard = allocation.routingNodes() + .getByAllocationId(shardToFail.shardId(), shardToFail.allocationId().getId()); if (failedShard != null) { if (failedShard != shardToFail) { logger.trace( @@ -257,7 +242,7 @@ public ClusterState applyFailedShards( allocation.removeAllocationId(failedShard); } logger.warn(() -> "failing shard [" + failedShardEntry + "]", failedShardEntry.failure()); - routingNodes.failShard(logger, failedShard, unassignedInfo, indexMetadata, allocation.changes()); + allocation.routingNodes().failShard(logger, failedShard, unassignedInfo, indexMetadata, allocation.changes()); } else { logger.trace("{} shard routing failed in an earlier iteration (routing: {})", shardToFail.shardId(), shardToFail); } @@ -280,15 +265,7 @@ public ClusterState applyFailedShards( * if needed. */ public ClusterState disassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) { - RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); - RoutingAllocation allocation = new RoutingAllocation( - allocationDeciders, - routingNodes, - clusterState, - clusterInfoService.getClusterInfo(), - snapshotsInfoService.snapshotShardSizes(), - currentNanoTime() - ); + RoutingAllocation allocation = createRoutingAllocation(clusterState, currentNanoTime()); // first, clear from the shards any node id they used to belong to that is now dead disassociateDeadNodes(allocation); @@ -439,18 +416,10 @@ public static String firstListElementsToCommaDelimitedString( } public CommandsResult reroute(final ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) { - RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); // we don't shuffle the unassigned shards here, to try and get as close as possible to // a consistent result of the effect the commands have on the routing // this allows systems to dry run the commands, see the resulting cluster state, and act on it - RoutingAllocation allocation = new RoutingAllocation( - allocationDeciders, - routingNodes, - clusterState, - clusterInfoService.getClusterInfo(), - snapshotsInfoService.snapshotShardSizes(), - currentNanoTime() - ); + RoutingAllocation allocation = createRoutingAllocation(clusterState, currentNanoTime()); // don't short circuit deciders, we want a full explanation allocation.debugDecision(true); // we ignore disable allocation, because commands are explicit @@ -481,16 +450,7 @@ public CommandsResult reroute(final ClusterState clusterState, AllocationCommand */ public ClusterState reroute(ClusterState clusterState, String reason) { ClusterState fixedClusterState = adaptAutoExpandReplicas(clusterState); - - RoutingNodes routingNodes = getMutableRoutingNodes(fixedClusterState); - RoutingAllocation allocation = new RoutingAllocation( - allocationDeciders, - routingNodes, - fixedClusterState, - clusterInfoService.getClusterInfo(), - snapshotsInfoService.snapshotShardSizes(), - currentNanoTime() - ); + RoutingAllocation allocation = createRoutingAllocation(fixedClusterState, currentNanoTime()); reroute(allocation); if (fixedClusterState == clusterState && allocation.routingNodesChanged() == false) { return clusterState; @@ -655,11 +615,15 @@ private static void applyStartedShards(RoutingAllocation routingAllocation, List } } - /** - * Create a mutable {@link RoutingNodes}. This is a costly operation so this must only be called once! - */ - private static RoutingNodes getMutableRoutingNodes(ClusterState clusterState) { - return clusterState.mutableRoutingNodes(); + private RoutingAllocation createRoutingAllocation(ClusterState clusterState, long currentNanoTime) { + return new RoutingAllocation( + allocationDeciders, + clusterState.mutableRoutingNodes(), + clusterState, + clusterInfoService.getClusterInfo(), + snapshotsInfoService.snapshotShardSizes(), + currentNanoTime + ); } /** override this to control time based decisions during allocation */ From 0f6965f32942ebc01d872578237faf3a1e5be39a Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Mon, 1 Aug 2022 09:51:20 +0200 Subject: [PATCH 022/265] Remove test only code from AllocationService (#88889) --- .../routing/allocation/AllocationService.java | 12 ------ .../cluster/reroute/ClusterRerouteTests.java | 2 +- .../cluster/routing/UnassignedInfoTests.java | 4 +- .../allocation/FailedShardsRoutingTests.java | 39 ++++++++++--------- .../allocation/InSyncAllocationIdTests.java | 2 +- .../MaxRetryAllocationDeciderTests.java | 14 +++---- .../RetryFailedAllocationTests.java | 2 +- .../SingleShardNoReplicasRoutingTests.java | 7 ++-- .../TrackFailedAllocationNodesTests.java | 13 +++++-- .../decider/FilterAllocationDeciderTests.java | 13 ++++++- 10 files changed, 57 insertions(+), 51 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index f48bea901ffac..7d97d1ea651ea 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -55,8 +55,6 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static java.util.Collections.emptyList; -import static java.util.Collections.singletonList; import static org.elasticsearch.cluster.health.ClusterShardHealth.getInactivePrimaryHealth; import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; @@ -168,16 +166,6 @@ private static ClusterState buildResultAndLogHealthChange(ClusterState oldState, return newState; } - // Used for testing - public ClusterState applyFailedShard(ClusterState clusterState, ShardRouting failedShard, boolean markAsStale) { - return applyFailedShards(clusterState, singletonList(new FailedShard(failedShard, null, null, markAsStale)), emptyList()); - } - - // Used for testing - public ClusterState applyFailedShards(ClusterState clusterState, List failedShards) { - return applyFailedShards(clusterState, failedShards, emptyList()); - } - /** * Applies the failed shards. Note, only assigned ShardRouting instances that exist in the routing table should be * provided as parameter. Also applies a list of allocation ids to remove from the in-sync set for shard copies for which there diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java index 30fc73b6d861e..627052aeea62b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java @@ -122,7 +122,7 @@ public void onFailure(Exception e) { randomBoolean() ) ); - newState = allocationService.applyFailedShards(clusterState, failedShards); + newState = allocationService.applyFailedShards(clusterState, failedShards, List.of()); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = clusterState.routingTable(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 9455e7fb62bae..68ae4f555b4c2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -39,6 +39,7 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -384,7 +385,8 @@ public void testFailedShard() { ShardRouting shardToFail = shardsWithState(clusterState.getRoutingNodes(), STARTED).get(0); clusterState = allocation.applyFailedShards( clusterState, - Collections.singletonList(new FailedShard(shardToFail, "test fail", null, randomBoolean())) + Collections.singletonList(new FailedShard(shardToFail, "test fail", null, randomBoolean())), + List.of() ); // verify the reason and details assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 0c058812b328e..deb93b0f49c56 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -29,6 +29,7 @@ import java.util.ArrayList; import java.util.HashSet; +import java.util.List; import java.util.Set; import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING; @@ -115,11 +116,7 @@ public void testFailedShardPrimaryRelocatingToAndFrom() { assertThat(clusterState.getRoutingNodes().node("node3").iterator().next().state(), equalTo(INITIALIZING)); logger.info("--> fail primary shard recovering instance on node3 being initialized"); - clusterState = allocation.applyFailedShard( - clusterState, - clusterState.getRoutingNodes().node("node3").iterator().next(), - randomBoolean() - ); + clusterState = applyFailedShard(allocation, clusterState, clusterState.getRoutingNodes().node("node3").iterator().next()); assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(STARTED)); assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(0)); @@ -144,11 +141,7 @@ public void testFailedShardPrimaryRelocatingToAndFrom() { assertThat(clusterState.getRoutingNodes().node("node3").iterator().next().state(), equalTo(INITIALIZING)); logger.info("--> fail primary shard recovering instance on node1 being relocated"); - clusterState = allocation.applyFailedShard( - clusterState, - clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next(), - randomBoolean() - ); + clusterState = applyFailedShard(allocation, clusterState, clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next()); // check promotion of replica to primary assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED)); @@ -232,7 +225,7 @@ public void testFailPrimaryStartedCheckReplicaElected() { logger.info("fail the primary shard, will have no place to be rerouted to (single node), so stays unassigned"); ShardRouting shardToFail = clusterState.routingTable().index("test").shard(0).primaryShard(); - newState = strategy.applyFailedShard(clusterState, shardToFail, randomBoolean()); + newState = applyFailedShard(strategy, clusterState, shardToFail); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -290,7 +283,7 @@ public void testFirstAllocationFailureSingleNode() { logger.info("fail the first shard, will have no place to be rerouted to (single node), so stays unassigned"); ShardRouting firstShard = clusterState.getRoutingNodes().node("node1").iterator().next(); - newState = strategy.applyFailedShard(clusterState, firstShard, randomBoolean()); + newState = applyFailedShard(strategy, clusterState, firstShard); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -352,7 +345,7 @@ public void testSingleShardMultipleAllocationFailures() { } } - clusterState = strategy.applyFailedShards(clusterState, failedShards); + clusterState = strategy.applyFailedShards(clusterState, failedShards, List.of()); routingNodes = clusterState.getRoutingNodes(); for (FailedShard failedShard : failedShards) { if (routingNodes.getByAllocationId( @@ -412,7 +405,7 @@ public void testFirstAllocationFailureTwoNodes() { logger.info("fail the first shard, will start INITIALIZING on the second node"); final ShardRouting firstShard = clusterState.getRoutingNodes().node(nodeHoldingPrimary).iterator().next(); - newState = strategy.applyFailedShard(clusterState, firstShard, randomBoolean()); + newState = applyFailedShard(strategy, clusterState, firstShard); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -517,7 +510,7 @@ public void testRebalanceFailure() { logger.info("Fail the shards on node 3"); ShardRouting shardToFail = routingNodes.node("node3").iterator().next(); - newState = strategy.applyFailedShard(clusterState, shardToFail, randomBoolean()); + newState = applyFailedShard(strategy, clusterState, shardToFail); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingNodes = clusterState.getRoutingNodes(); @@ -572,7 +565,7 @@ public void testFailAllReplicasInitializingOnPrimaryFail() { // fail the primary shard, check replicas get removed as well... ShardRouting primaryShardToFail = clusterState.routingTable().index("test").shard(0).primaryShard(); - ClusterState newState = allocation.applyFailedShard(clusterState, primaryShardToFail, randomBoolean()); + ClusterState newState = applyFailedShard(allocation, clusterState, primaryShardToFail); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; // the primary gets allocated on another node, replicas are initializing @@ -621,7 +614,7 @@ public void testFailAllReplicasInitializingOnPrimaryFailWhileHavingAReplicaToEle // fail the primary shard, check one replica gets elected to primary, others become INITIALIZING (from it) ShardRouting primaryShardToFail = clusterState.routingTable().index("test").shard(0).primaryShard(); - ClusterState newState = allocation.applyFailedShard(clusterState, primaryShardToFail, randomBoolean()); + ClusterState newState = applyFailedShard(allocation, clusterState, primaryShardToFail); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(1)); @@ -704,7 +697,7 @@ public void testReplicaOnNewestVersionIsPromoted() { // fail the primary shard again and make sure the correct replica is promoted ShardRouting primaryShardToFail = clusterState.routingTable().index("test").shard(0).primaryShard(); - ClusterState newState = allocation.applyFailedShard(clusterState, primaryShardToFail, randomBoolean()); + ClusterState newState = applyFailedShard(allocation, clusterState, primaryShardToFail); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; // the primary gets allocated on another node @@ -735,7 +728,7 @@ public void testReplicaOnNewestVersionIsPromoted() { // fail the primary shard again, and ensure the same thing happens ShardRouting secondPrimaryShardToFail = clusterState.routingTable().index("test").shard(0).primaryShard(); - newState = allocation.applyFailedShard(clusterState, secondPrimaryShardToFail, randomBoolean()); + newState = applyFailedShard(allocation, clusterState, secondPrimaryShardToFail); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; // the primary gets allocated on another node @@ -762,4 +755,12 @@ public void testReplicaOnNewestVersionIsPromoted() { ); } } + + private ClusterState applyFailedShard(AllocationService allocationService, ClusterState clusterState, ShardRouting failedShard) { + return allocationService.applyFailedShards( + clusterState, + List.of(new FailedShard(failedShard, null, null, randomBoolean())), + List.of() + ); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java index cccc9eb1663ed..8004d42c44c25 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/InSyncAllocationIdTests.java @@ -130,7 +130,7 @@ public void testInSyncAllocationIdsUpdated() { logger.info("fail primary shard"); ShardRouting startedPrimary = shardsWithState(clusterState.getRoutingNodes(), STARTED).get(0); - clusterState = allocation.applyFailedShard(clusterState, startedPrimary, true); + clusterState = allocation.applyFailedShards(clusterState, List.of(new FailedShard(startedPrimary, null, null, true)), List.of()); assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(0)); assertEquals( diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java index 7bbc1d16839ab..a6f21bebe820f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java @@ -94,7 +94,7 @@ public void testSingleRetryOnIgnore() { randomBoolean() ) ); - ClusterState newState = strategy.applyFailedShards(clusterState, failedShards); + ClusterState newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -107,7 +107,7 @@ public void testSingleRetryOnIgnore() { List failedShards = Collections.singletonList( new FailedShard(routingTable.index("idx").shard(0).shard(0), "boom", new UnsupportedOperationException(), randomBoolean()) ); - ClusterState newState = strategy.applyFailedShards(clusterState, failedShards); + ClusterState newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -134,7 +134,7 @@ public void testSingleRetryOnIgnore() { new FailedShard(routingTable.index("idx").shard(0).shard(0), "boom", new UnsupportedOperationException(), randomBoolean()) ); - newState = strategy.applyFailedShards(clusterState, failedShards); + newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -148,7 +148,7 @@ public void testSingleRetryOnIgnore() { failedShards = Collections.singletonList( new FailedShard(routingTable.index("idx").shard(0).shard(0), "boom", new UnsupportedOperationException(), randomBoolean()) ); - newState = strategy.applyFailedShards(clusterState, failedShards); + newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -172,7 +172,7 @@ public void testFailedAllocation() { randomBoolean() ) ); - ClusterState newState = strategy.applyFailedShards(clusterState, failedShards); + ClusterState newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -196,7 +196,7 @@ public void testFailedAllocation() { List failedShards = Collections.singletonList( new FailedShard(routingTable.index("idx").shard(0).shard(0), "boom", new UnsupportedOperationException(), randomBoolean()) ); - ClusterState newState = strategy.applyFailedShards(clusterState, failedShards); + ClusterState newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -268,7 +268,7 @@ public void testFailedAllocation() { List failedShards = Collections.singletonList( new FailedShard(routingTable.index("idx").shard(0).shard(0), "ZOOOMG", new UnsupportedOperationException(), randomBoolean()) ); - newState = strategy.applyFailedShards(clusterState, failedShards); + newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java index 8087d1c51b500..d63fecfec64dd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java @@ -69,7 +69,7 @@ public void testRetryFailedResetForAllocationCommands() { List failedShards = Collections.singletonList( new FailedShard(getReplica(), "failing-shard::attempt-" + i, new ElasticsearchException("simulated"), randomBoolean()) ); - clusterState = strategy.applyFailedShards(clusterState, failedShards); + clusterState = strategy.applyFailedShards(clusterState, failedShards, List.of()); clusterState = strategy.reroute(clusterState, "allocation retry attempt-" + i); } assertThat("replica should not be assigned", getReplica().state(), equalTo(ShardRoutingState.UNASSIGNED)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index 09d4f23b3d75c..0fb0c6727ff2f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings; import java.util.HashSet; +import java.util.List; import java.util.Set; import static org.elasticsearch.cluster.routing.RoutingNodesHelper.shardsWithState; @@ -187,10 +188,10 @@ public void testSingleIndexShardFailed() { logger.info("Marking the shard as failed"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyFailedShard( + newState = strategy.applyFailedShards( clusterState, - routingNodes.node("node1").shardsWithState(INITIALIZING).get(0), - randomBoolean() + List.of(new FailedShard(routingNodes.node("node1").shardsWithState(INITIALIZING).get(0), null, null, randomBoolean())), + List.of() ); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TrackFailedAllocationNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TrackFailedAllocationNodesTests.java index b4ddcd9e698f6..e30a7c2f5141b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TrackFailedAllocationNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TrackFailedAllocationNodesTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings; import java.util.HashSet; +import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.empty; @@ -50,10 +51,10 @@ public void testTrackFailedNodes() { // track the failed nodes if shard is not started for (int i = 0; i < maxRetries; i++) { failedNodeIds.add(clusterState.routingTable().index("idx").shard(0).shard(0).currentNodeId()); - clusterState = allocationService.applyFailedShard( + clusterState = allocationService.applyFailedShards( clusterState, - clusterState.routingTable().index("idx").shard(0).shard(0), - randomBoolean() + List.of(new FailedShard(clusterState.routingTable().index("idx").shard(0).shard(0), null, null, randomBoolean())), + List.of() ); assertThat( clusterState.routingTable().index("idx").shard(0).shard(0).unassignedInfo().getFailedNodeIds(), @@ -69,7 +70,11 @@ public void testTrackFailedNodes() { // do not track the failed nodes while shard is started clusterState = startInitializingShardsAndReroute(allocationService, clusterState); assertThat(clusterState.routingTable().index("idx").shard(0).shard(0).state(), equalTo(ShardRoutingState.STARTED)); - clusterState = allocationService.applyFailedShard(clusterState, clusterState.routingTable().index("idx").shard(0).shard(0), false); + clusterState = allocationService.applyFailedShards( + clusterState, + List.of(new FailedShard(clusterState.routingTable().index("idx").shard(0).shard(0), null, null, false)), + List.of() + ); assertThat(clusterState.routingTable().index("idx").shard(0).shard(0).unassignedInfo().getFailedNodeIds(), empty()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index 6377322e1b40a..489f07b63981d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; @@ -66,7 +67,11 @@ public void testFilterInitialRecovery() { // we can initially only allocate on node2 assertEquals(routingTable.index("idx").shard(0).shard(0).state(), INITIALIZING); assertEquals(routingTable.index("idx").shard(0).shard(0).currentNodeId(), "node2"); - routingTable = service.applyFailedShard(state, routingTable.index("idx").shard(0).shard(0), randomBoolean()).routingTable(); + routingTable = service.applyFailedShards( + state, + List.of(new FailedShard(routingTable.index("idx").shard(0).shard(0), null, null, randomBoolean())), + List.of() + ).routingTable(); state = ClusterState.builder(state).routingTable(routingTable).build(); assertEquals(routingTable.index("idx").shard(0).shard(0).state(), UNASSIGNED); assertNull(routingTable.index("idx").shard(0).shard(0).currentNodeId()); @@ -122,7 +127,11 @@ public void testFilterInitialRecovery() { true, "test" ); - state = service.applyFailedShard(state, routingTable.index("idx").shard(0).primaryShard(), randomBoolean()); + state = service.applyFailedShards( + state, + List.of(new FailedShard(routingTable.index("idx").shard(0).primaryShard(), null, null, randomBoolean())), + List.of() + ); // now bring back node1 and see it's assigned state = service.reroute(ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).add(node1)).build(), "test"); From 516f2fbe1523b46d15b2cdd49549c6a82cc0c754 Mon Sep 17 00:00:00 2001 From: Christos Soulios <1561376+csoulios@users.noreply.github.com> Date: Mon, 1 Aug 2022 12:00:24 +0300 Subject: [PATCH 023/265] Remove `getTestName()` from the index name (#88941) This PR removes the test name from the source index name and replaces it with a 14 char long random alphanumeric. In the past, we had chosen to include the test name in the index name so that there are no naming conflicts between indexes of different tests. However, some times test names include invalid characters that make index creation fail. --- .../xpack/rollup/v2/RollupActionSingleNodeTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java index 85439a83e2371..6e4f7dbf19433 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java @@ -143,7 +143,7 @@ protected Collection> getPlugins() { @Before public void setup() { - sourceIndex = getTestName().toLowerCase(Locale.ROOT) + "-" + randomAlphaOfLength(4).toLowerCase(Locale.ROOT); + sourceIndex = randomAlphaOfLength(14).toLowerCase(Locale.ROOT); rollupIndex = "rollup-" + sourceIndex; startTime = randomLongBetween(946769284000L, 1607470084000L); // random date between 2000-2020 docCount = randomIntBetween(10, 9000); From d2868b00bf490f1a8159d989ab108d9e873f8592 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Mon, 1 Aug 2022 12:53:03 +0200 Subject: [PATCH 024/265] Support bulk updates of API keys (#88856) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds a new API route to support bulk updates of API keys: `POST _security/api_key/_bulk_update` The route takes a list of IDs (`ids`) of API keys to update, along with the same request parameters as the single operation route: - `role_descriptors` - The list of role descriptors specified for the key. This is one of the two parts that determines an API key’s privileges. - `metadata_flattened` - The searchable metadata associated to an API key Analogously to the single operation route, a call to `_bulk_update` automatically updates the `limited_by_role_descriptors`, `creator`, and `version` fields for each API key. The implementation ports the single API key update operation to use the new bulk functionality under the hood, translating as necessary at the transport layer. Relates: #88758 --- docs/changelog/88856.yaml | 5 + .../org/elasticsearch/test/ESTestCase.java | 4 + .../apikey/BaseUpdateApiKeyRequest.java | 74 ++++ .../action/apikey/BulkUpdateApiKeyAction.java | 20 + .../apikey/BulkUpdateApiKeyRequest.java | 68 ++++ .../apikey/BulkUpdateApiKeyResponse.java | 125 ++++++ .../action/apikey/UpdateApiKeyRequest.java | 64 +-- .../ManageOwnApiKeyClusterPrivilege.java | 5 +- .../apikey/BulkUpdateApiKeyRequestTests.java | 113 ++++++ .../apikey/BulkUpdateApiKeyResponseTests.java | 129 ++++++ .../apikey/UpdateApiKeyRequestTests.java | 2 +- .../ManageOwnApiKeyClusterPrivilegeTests.java | 13 + .../xpack/security/operator/Constants.java | 1 + .../xpack/security/apikey/ApiKeyRestIT.java | 153 ++++++- .../security/authc/ApiKeyIntegTests.java | 383 ++++++++++++++---- .../xpack/security/Security.java | 5 + .../TransportBaseUpdateApiKeyAction.java | 76 ++++ .../TransportBulkUpdateApiKeyAction.java | 64 +++ .../apikey/TransportUpdateApiKeyAction.java | 76 ++-- .../xpack/security/authc/ApiKeyService.java | 240 ++++++----- .../apikey/RestBulkUpdateApiKeyAction.java | 68 ++++ .../test/TestSecurityClient.java | 11 + .../security/authc/ApiKeyServiceTests.java | 30 +- .../apikey/RestUpdateApiKeyActionTests.java | 5 +- 24 files changed, 1446 insertions(+), 288 deletions(-) create mode 100644 docs/changelog/88856.yaml create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponseTests.java create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBulkUpdateApiKeyAction.java create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java diff --git a/docs/changelog/88856.yaml b/docs/changelog/88856.yaml new file mode 100644 index 0000000000000..49db9671bc501 --- /dev/null +++ b/docs/changelog/88856.yaml @@ -0,0 +1,5 @@ +pr: 88856 +summary: Support bulk updates of API keys +area: Security +type: feature +issues: [] diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index a15b67d0409ed..dff2509032460 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1268,6 +1268,10 @@ public static List randomSubsetOf(int size, Collection collection) { return tempList.subList(0, size); } + public static List shuffledList(List list) { + return randomSubsetOf(list.size(), list); + } + /** * Builds a set of unique items. Usually you'll get the requested count but you might get less than that number if the supplier returns * lots of repeats. Make sure that the items properly implement equals and hashcode. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java new file mode 100644 index 0000000000000..696893f0f41ef --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xpack.core.security.action.role.RoleDescriptorRequestValidator; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public abstract class BaseUpdateApiKeyRequest extends ActionRequest { + + @Nullable + protected final List roleDescriptors; + @Nullable + protected final Map metadata; + + public BaseUpdateApiKeyRequest(@Nullable final List roleDescriptors, @Nullable final Map metadata) { + this.roleDescriptors = roleDescriptors; + this.metadata = metadata; + } + + public BaseUpdateApiKeyRequest(StreamInput in) throws IOException { + super(in); + this.roleDescriptors = in.readOptionalList(RoleDescriptor::new); + this.metadata = in.readMap(); + } + + public Map getMetadata() { + return metadata; + } + + public List getRoleDescriptors() { + return roleDescriptors; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (metadata != null && MetadataUtils.containsReservedMetadata(metadata)) { + validationException = addValidationError( + "API key metadata keys may not start with [" + MetadataUtils.RESERVED_PREFIX + "]", + validationException + ); + } + if (roleDescriptors != null) { + for (RoleDescriptor roleDescriptor : roleDescriptors) { + validationException = RoleDescriptorRequestValidator.validate(roleDescriptor, validationException); + } + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalCollection(roleDescriptors); + out.writeGenericMap(metadata); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyAction.java new file mode 100644 index 0000000000000..7bda9772cd0da --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyAction.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.action.ActionType; + +public final class BulkUpdateApiKeyAction extends ActionType { + + public static final String NAME = "cluster:admin/xpack/security/api_key/bulk_update"; + public static final BulkUpdateApiKeyAction INSTANCE = new BulkUpdateApiKeyAction(); + + private BulkUpdateApiKeyAction() { + super(NAME, BulkUpdateApiKeyResponse::new); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java new file mode 100644 index 0000000000000..59461265d6a5b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public final class BulkUpdateApiKeyRequest extends BaseUpdateApiKeyRequest { + + public static BulkUpdateApiKeyRequest usingApiKeyIds(String... ids) { + return new BulkUpdateApiKeyRequest(Arrays.stream(ids).toList(), null, null); + } + + public static BulkUpdateApiKeyRequest wrap(final UpdateApiKeyRequest request) { + return new BulkUpdateApiKeyRequest(List.of(request.getId()), request.getRoleDescriptors(), request.getMetadata()); + } + + private final List ids; + + public BulkUpdateApiKeyRequest( + final List ids, + @Nullable final List roleDescriptors, + @Nullable final Map metadata + ) { + super(roleDescriptors, metadata); + this.ids = Objects.requireNonNull(ids, "API key IDs must not be null"); + } + + public BulkUpdateApiKeyRequest(StreamInput in) throws IOException { + super(in); + this.ids = in.readStringList(); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if (ids.isEmpty()) { + validationException = addValidationError("Field [ids] cannot be empty", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringCollection(ids); + } + + public List getIds() { + return ids; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java new file mode 100644 index 0000000000000..7ea0e1fcba4a4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public final class BulkUpdateApiKeyResponse extends ActionResponse implements ToXContentObject, Writeable { + + private final List updated; + private final List noops; + private final Map errorDetails; + + public BulkUpdateApiKeyResponse(final List updated, final List noops, final Map errorDetails) { + this.updated = updated; + this.noops = noops; + this.errorDetails = errorDetails; + } + + public BulkUpdateApiKeyResponse(StreamInput in) throws IOException { + super(in); + this.updated = in.readStringList(); + this.noops = in.readStringList(); + this.errorDetails = in.readMap(StreamInput::readString, StreamInput::readException); + } + + public List getUpdated() { + return updated; + } + + public List getNoops() { + return noops; + } + + public Map getErrorDetails() { + return errorDetails; + } + + public int getTotalResultCount() { + return updated.size() + noops.size() + errorDetails.size(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().stringListField("updated", updated).stringListField("noops", noops); + if (errorDetails.isEmpty() == false) { + builder.startObject("errors"); + { + builder.field("count", errorDetails.size()); + builder.startObject("details"); + for (Map.Entry idWithException : errorDetails.entrySet()) { + builder.startObject(idWithException.getKey()); + ElasticsearchException.generateThrowableXContent(builder, params, idWithException.getValue()); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringCollection(updated); + out.writeStringCollection(noops); + out.writeMap(errorDetails, StreamOutput::writeString, StreamOutput::writeException); + } + + @Override + public String toString() { + return "BulkUpdateApiKeyResponse{" + "updated=" + updated + ", noops=" + noops + ", errorDetails=" + errorDetails + '}'; + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private final List updated; + private final List noops; + private final Map errorDetails; + + public Builder() { + updated = new ArrayList<>(); + noops = new ArrayList<>(); + errorDetails = new HashMap<>(); + } + + public Builder updated(final String id) { + updated.add(id); + return this; + } + + public Builder noop(final String id) { + noops.add(id); + return this; + } + + public Builder error(final String id, final Exception ex) { + errorDetails.put(id, ex); + return this; + } + + public BulkUpdateApiKeyResponse build() { + return new BulkUpdateApiKeyResponse(updated, noops, errorDetails); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java index e1cf2f939725b..688349dca3cf3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java @@ -7,98 +7,44 @@ package org.elasticsearch.xpack.core.security.action.apikey; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xpack.core.security.action.role.RoleDescriptorRequestValidator; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.core.security.support.MetadataUtils; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Objects; -import static org.elasticsearch.action.ValidateActions.addValidationError; - -public final class UpdateApiKeyRequest extends ActionRequest { +public final class UpdateApiKeyRequest extends BaseUpdateApiKeyRequest { + public static UpdateApiKeyRequest usingApiKeyId(final String id) { + return new UpdateApiKeyRequest(id, null, null); + } private final String id; - @Nullable - private final Map metadata; - @Nullable - private final List roleDescriptors; public UpdateApiKeyRequest( final String id, @Nullable final List roleDescriptors, @Nullable final Map metadata ) { + super(roleDescriptors, metadata); this.id = Objects.requireNonNull(id, "API key ID must not be null"); - this.roleDescriptors = roleDescriptors; - this.metadata = metadata; } public UpdateApiKeyRequest(StreamInput in) throws IOException { super(in); - this.roleDescriptors = in.readOptionalList(RoleDescriptor::new); - this.metadata = in.readMap(); this.id = in.readString(); } - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (metadata != null && MetadataUtils.containsReservedMetadata(metadata)) { - validationException = addValidationError( - "API key metadata keys may not start with [" + MetadataUtils.RESERVED_PREFIX + "]", - validationException - ); - } - if (roleDescriptors != null) { - for (RoleDescriptor roleDescriptor : roleDescriptors) { - validationException = RoleDescriptorRequestValidator.validate(roleDescriptor, validationException); - } - } - return validationException; - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeOptionalCollection(roleDescriptors); - out.writeGenericMap(metadata); out.writeString(id); } - public static UpdateApiKeyRequest usingApiKeyId(String id) { - return new UpdateApiKeyRequest(id, null, null); - } - public String getId() { return id; } - - public Map getMetadata() { - return metadata; - } - - public List getRoleDescriptors() { - return roleDescriptors; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - UpdateApiKeyRequest that = (UpdateApiKeyRequest) o; - return id.equals(that.id) && Objects.equals(metadata, that.metadata) && Objects.equals(roleDescriptors, that.roleDescriptors); - } - - @Override - public int hashCode() { - return Objects.hash(id, metadata, roleDescriptors); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java index a473a328caf6b..c80d6b8ff15c6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyRequest; @@ -62,10 +63,10 @@ private ManageOwnClusterPermissionCheck() { protected boolean extendedCheck(String action, TransportRequest request, Authentication authentication) { if (request instanceof CreateApiKeyRequest) { return true; - } else if (request instanceof UpdateApiKeyRequest) { + } else if (request instanceof UpdateApiKeyRequest || request instanceof BulkUpdateApiKeyRequest) { // Note: we return `true` here even if the authenticated entity is an API key. API keys *cannot* update themselves, // however this is a business logic restriction, rather than one driven solely by privileges. We therefore enforce this - // limitation at the transport layer, in `TransportUpdateApiKeyAction`. + // limitation at the transport layer, in `TransportBaseUpdateApiKeyAction`. // Ownership of an API key, for regular users, is enforced at the service layer. return true; } else if (request instanceof final GetApiKeyRequest getApiKeyRequest) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java new file mode 100644 index 0000000000000..7a24729b7aef5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.containsStringIgnoringCase; +import static org.hamcrest.Matchers.equalTo; + +public class BulkUpdateApiKeyRequestTests extends ESTestCase { + + public void testSerialization() throws IOException { + final boolean roleDescriptorsPresent = randomBoolean(); + final List descriptorList; + if (roleDescriptorsPresent == false) { + descriptorList = null; + } else { + final int numDescriptors = randomIntBetween(0, 4); + descriptorList = new ArrayList<>(); + for (int i = 0; i < numDescriptors; i++) { + descriptorList.add(new RoleDescriptor("role_" + i, new String[] { "all" }, null, null)); + } + } + + final List ids = randomList(1, 5, () -> randomAlphaOfLength(10)); + final Map metadata = ApiKeyTests.randomMetadata(); + final var request = new BulkUpdateApiKeyRequest(ids, descriptorList, metadata); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + final var serialized = new BulkUpdateApiKeyRequest(in); + assertEquals(ids, serialized.getIds()); + assertEquals(descriptorList, serialized.getRoleDescriptors()); + assertEquals(metadata, request.getMetadata()); + } + } + } + + public void testNullValuesValidForNonIds() { + final var request = BulkUpdateApiKeyRequest.usingApiKeyIds("id"); + assertNull(request.validate()); + } + + public void testEmptyIdsNotValid() { + final var request = new BulkUpdateApiKeyRequest(List.of(), null, null); + final ActionRequestValidationException ve = request.validate(); + assertNotNull(ve); + assertThat(ve.validationErrors().size(), equalTo(1)); + assertThat(ve.validationErrors().get(0), containsString("Field [ids] cannot be empty")); + } + + public void testMetadataKeyValidation() { + final var reservedKey = "_" + randomAlphaOfLengthBetween(0, 10); + final var metadataValue = randomAlphaOfLengthBetween(1, 10); + final var request = new BulkUpdateApiKeyRequest( + randomList(1, 5, () -> randomAlphaOfLength(10)), + null, + Map.of(reservedKey, metadataValue) + ); + final ActionRequestValidationException ve = request.validate(); + assertNotNull(ve); + assertThat(ve.validationErrors().size(), equalTo(1)); + assertThat(ve.validationErrors().get(0), containsString("API key metadata keys may not start with [_]")); + } + + public void testRoleDescriptorValidation() { + final var request = new BulkUpdateApiKeyRequest( + randomList(1, 5, () -> randomAlphaOfLength(10)), + List.of( + new RoleDescriptor( + randomAlphaOfLength(5), + new String[] { "manage_index_template" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("rad").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[] { + RoleDescriptor.ApplicationResourcePrivileges.builder() + .application(randomFrom("app*tab", "app 1")) + .privileges(randomFrom(" ", "\n")) + .resources("resource") + .build() }, + null, + null, + Map.of("_key", "value"), + null + ) + ), + null + ); + final ActionRequestValidationException ve = request.validate(); + assertNotNull(ve); + assertThat(ve.validationErrors().get(0), containsString("unknown cluster privilege")); + assertThat(ve.validationErrors().get(1), containsString("unknown index privilege")); + assertThat(ve.validationErrors().get(2), containsStringIgnoringCase("application name")); + assertThat(ve.validationErrors().get(3), containsStringIgnoringCase("Application privilege names")); + assertThat(ve.validationErrors().get(4), containsStringIgnoringCase("role descriptor metadata keys may not start with ")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponseTests.java new file mode 100644 index 0000000000000..2b77da8dd015f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponseTests.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class BulkUpdateApiKeyResponseTests extends ESTestCase { + + public void testSerialization() throws IOException { + final boolean includeErrors = randomBoolean(); + final var response = new BulkUpdateApiKeyResponse( + List.of("api-key-id-1"), + List.of("api-key-id-2", "api-key-id-3"), + includeErrors + ? Map.of( + "failed-api-key-id-1", + new IllegalArgumentException("error1"), + "failed-api-key-id-2", + new ElasticsearchException("error2") + ) + : Map.of() + ); + try (BytesStreamOutput output = new BytesStreamOutput()) { + response.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + final var serialized = new BulkUpdateApiKeyResponse(input); + assertThat(serialized.getUpdated(), equalTo(response.getUpdated())); + assertThat(serialized.getNoops(), equalTo(response.getNoops())); + assertThat(serialized.getErrorDetails().size(), equalTo(response.getErrorDetails().size())); + if (includeErrors) { + assertThat(serialized.getErrorDetails().get("failed-api-key-id-1").toString(), containsString("error1")); + assertThat(serialized.getErrorDetails().get("failed-api-key-id-2").toString(), containsString("error2")); + } + } + } + } + + public void testToXContent() throws IOException { + // Force ordered key set for deterministic comparison with raw JSON string below + final SortedMap errorDetails = new TreeMap<>(); + errorDetails.put("failed-api-key-id-1", new IllegalArgumentException("msg - 1")); + errorDetails.put("failed-api-key-id-2", new ResourceNotFoundException("potato")); + errorDetails.put("failed-api-key-id-3", new ElasticsearchException("error1", new IllegalArgumentException("msg - 1"))); + errorDetails.put("failed-api-key-id-4", new ElasticsearchException("error2", new IllegalArgumentException("msg - 2"))); + final var response = new BulkUpdateApiKeyResponse(List.of("api-key-id-1"), List.of("api-key-id-2", "api-key-id-3"), errorDetails); + final XContentBuilder builder = XContentFactory.jsonBuilder(); + + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + + assertThat(Strings.toString(builder), equalTo(XContentHelper.stripWhitespace(""" + { + "updated": [ + "api-key-id-1" + ], + "noops": [ + "api-key-id-2", + "api-key-id-3" + ], + "errors": { + "count": 4, + "details": { + "failed-api-key-id-1": { + "type": "illegal_argument_exception", + "reason": "msg - 1" + }, + "failed-api-key-id-2": { + "type": "resource_not_found_exception", + "reason": "potato" + }, + "failed-api-key-id-3": { + "type": "exception", + "reason": "error1", + "caused_by": { + "type": "illegal_argument_exception", + "reason": "msg - 1" + } + }, + "failed-api-key-id-4": { + "type": "exception", + "reason": "error2", + "caused_by": { + "type": "illegal_argument_exception", + "reason": "msg - 2" + } + } + } + } + }"""))); + } + + public void testToXContentOmitsErrorsSectionIfNoErrors() throws IOException { + final var response = new BulkUpdateApiKeyResponse(List.of("api-key-id-1"), List.of("api-key-id-2", "api-key-id-3"), Map.of()); + final XContentBuilder builder = XContentFactory.jsonBuilder(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertThat(Strings.toString(builder), equalTo(XContentHelper.stripWhitespace(""" + { + "updated": [ + "api-key-id-1" + ], + "noops": [ + "api-key-id-2", + "api-key-id-3" + ] + }"""))); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java index 37a0a56039da6..65ff6efce6054 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java @@ -24,7 +24,7 @@ public class UpdateApiKeyRequestTests extends ESTestCase { - public void testNullValuesValid() { + public void testNullValuesValidForNonIds() { final var request = new UpdateApiKeyRequest("id", null, null); assertNull(request.validate()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java index af3b81dbec024..32f3cee5fb644 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; @@ -27,6 +28,8 @@ import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; import org.elasticsearch.xpack.core.security.user.User; +import java.util.List; + import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; @@ -56,6 +59,16 @@ public void testAuthenticationForUpdateApiKeyAllowsAll() { assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/update", updateApiKeyRequest, authentication)); } + public void testAuthenticationForBulkUpdateApiKeyAllowsAll() { + final ClusterPermission clusterPermission = ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()) + .build(); + final List apiKeyIds = randomList(1, 5, () -> randomAlphaOfLengthBetween(4, 7)); + final Authentication authentication = AuthenticationTestHelper.builder().build(); + final TransportRequest bulkUpdateApiKeyRequest = new BulkUpdateApiKeyRequest(apiKeyIds, null, null); + + assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/update", bulkUpdateApiKeyRequest, authentication)); + } + public void testAuthenticationWithApiKeyDeniesAccessToApiKeyActionsWhenItIsNotOwner() { final ClusterPermission clusterPermission = ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()) .build(); diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index b39b1a143a980..753f8c5cd716b 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -179,6 +179,7 @@ public class Constants { "cluster:admin/xpack/security/api_key/invalidate", "cluster:admin/xpack/security/api_key/query", "cluster:admin/xpack/security/api_key/update", + "cluster:admin/xpack/security/api_key/bulk_update", "cluster:admin/xpack/security/cache/clear", "cluster:admin/xpack/security/delegate_pki", "cluster:admin/xpack/security/enroll/node", diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index 516d3b3d7a3a2..a23bfdd60f87e 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -32,11 +32,14 @@ import java.util.Set; import static org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField.RUN_AS_USER_HEADER; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -218,6 +221,51 @@ public void testUpdateApiKey() throws IOException { doTestUpdateApiKey(apiKeyName, apiKeyId, apiKeyEncoded, apiKeyMetadata); } + @SuppressWarnings({ "unchecked" }) + public void testBulkUpdateApiKey() throws IOException { + final EncodedApiKey apiKeyExpectingUpdate = createApiKey("my-api-key-name-1", Map.of("not", "returned")); + final EncodedApiKey apiKeyExpectingNoop = createApiKey("my-api-key-name-2", Map.of("not", "returned (changed)", "foo", "bar")); + final Map metadataForInvalidatedKey = Map.of("will not be updated", true); + final EncodedApiKey invalidatedApiKey = createApiKey("my-api-key-name-3", metadataForInvalidatedKey); + getSecurityClient().invalidateApiKeys(invalidatedApiKey.id); + final var notFoundApiKeyId = "not-found-api-key-id"; + final List idsToUpdate = shuffledList( + List.of(apiKeyExpectingUpdate.id, apiKeyExpectingNoop.id, notFoundApiKeyId, invalidatedApiKey.id) + ); + final var bulkUpdateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update"); + final Map expectedApiKeyMetadata = Map.of("not", "returned (changed)", "foo", "bar"); + final Map updateApiKeyRequestBody = Map.of("ids", idsToUpdate, "metadata", expectedApiKeyMetadata); + bulkUpdateApiKeyRequest.setJsonEntity( + XContentTestUtils.convertToXContent(updateApiKeyRequestBody, XContentType.JSON).utf8ToString() + ); + + final Response bulkUpdateApiKeyResponse = performRequestUsingRandomAuthMethod(bulkUpdateApiKeyRequest); + + assertOK(bulkUpdateApiKeyResponse); + final Map response = responseAsMap(bulkUpdateApiKeyResponse); + assertEquals(List.of(apiKeyExpectingUpdate.id()), response.get("updated")); + assertEquals(List.of(apiKeyExpectingNoop.id()), response.get("noops")); + final Map errors = (Map) response.get("errors"); + assertEquals(2, errors.get("count")); + final Map> errorDetails = (Map>) errors.get("details"); + assertEquals(2, errorDetails.size()); + expectErrorFields( + "resource_not_found_exception", + "no API key owned by requesting user found for ID [" + notFoundApiKeyId + "]", + errorDetails.get(notFoundApiKeyId) + ); + expectErrorFields( + "illegal_argument_exception", + "cannot update invalidated API key [" + invalidatedApiKey.id + "]", + errorDetails.get(invalidatedApiKey.id) + ); + expectMetadata(apiKeyExpectingUpdate.id, expectedApiKeyMetadata); + expectMetadata(apiKeyExpectingNoop.id, expectedApiKeyMetadata); + expectMetadata(invalidatedApiKey.id, metadataForInvalidatedKey); + doTestAuthenticationWithApiKey(apiKeyExpectingUpdate.name, apiKeyExpectingUpdate.id, apiKeyExpectingUpdate.encoded); + doTestAuthenticationWithApiKey(apiKeyExpectingNoop.name, apiKeyExpectingNoop.id, apiKeyExpectingNoop.encoded); + } + public void testGrantTargetCanUpdateApiKey() throws IOException { final var request = new Request("POST", "_security/api_key/grant"); request.setOptions( @@ -240,9 +288,14 @@ public void testGrantTargetCanUpdateApiKey() throws IOException { assertThat(apiKeyId, not(emptyString())); assertThat(apiKeyEncoded, not(emptyString())); - doTestUpdateApiKey(apiKeyName, apiKeyId, apiKeyEncoded, null); + if (randomBoolean()) { + doTestUpdateApiKey(apiKeyName, apiKeyId, apiKeyEncoded, null); + } else { + doTestUpdateApiKeyUsingBulkAction(apiKeyName, apiKeyId, apiKeyEncoded, null); + } } + @SuppressWarnings({ "unchecked" }) public void testGrantorCannotUpdateApiKeyOfGrantTarget() throws IOException { final var request = new Request("POST", "_security/api_key/grant"); final var apiKeyName = "test_api_key_password"; @@ -263,10 +316,32 @@ public void testGrantorCannotUpdateApiKeyOfGrantTarget() throws IOException { final var updateApiKeyRequest = new Request("PUT", "_security/api_key/" + apiKeyId); updateApiKeyRequest.setJsonEntity(XContentTestUtils.convertToXContent(Map.of(), XContentType.JSON).utf8ToString()); + final ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(updateApiKeyRequest)); assertEquals(404, e.getResponse().getStatusLine().getStatusCode()); assertThat(e.getMessage(), containsString("no API key owned by requesting user found for ID [" + apiKeyId + "]")); + + // Bulk update also not allowed + final var bulkUpdateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update"); + bulkUpdateApiKeyRequest.setJsonEntity( + XContentTestUtils.convertToXContent(Map.of("ids", List.of(apiKeyId)), XContentType.JSON).utf8ToString() + ); + final Response bulkUpdateApiKeyResponse = adminClient().performRequest(bulkUpdateApiKeyRequest); + + assertOK(bulkUpdateApiKeyResponse); + final Map bulkUpdateApiKeyResponseMap = responseAsMap(bulkUpdateApiKeyResponse); + assertThat((List) bulkUpdateApiKeyResponseMap.get("updated"), empty()); + assertThat((List) bulkUpdateApiKeyResponseMap.get("noops"), empty()); + final Map errors = (Map) bulkUpdateApiKeyResponseMap.get("errors"); + assertEquals(1, errors.get("count")); + final Map> errorDetails = (Map>) errors.get("details"); + assertEquals(1, errorDetails.size()); + expectErrorFields( + "resource_not_found_exception", + "no API key owned by requesting user found for ID [" + apiKeyId + "]", + errorDetails.get(apiKeyId) + ); } private void doTestAuthenticationWithApiKey(final String apiKeyName, final String apiKeyId, final String apiKeyEncoded) @@ -297,7 +372,7 @@ private void doTestUpdateApiKey( : Map.of("metadata", expectedApiKeyMetadata); updateApiKeyRequest.setJsonEntity(XContentTestUtils.convertToXContent(updateApiKeyRequestBody, XContentType.JSON).utf8ToString()); - final Response updateApiKeyResponse = doUpdateUsingRandomAuthMethod(updateApiKeyRequest); + final Response updateApiKeyResponse = performRequestUsingRandomAuthMethod(updateApiKeyRequest); assertOK(updateApiKeyResponse); final Map updateApiKeyResponseMap = responseAsMap(updateApiKeyResponse); @@ -307,20 +382,76 @@ private void doTestUpdateApiKey( doTestAuthenticationWithApiKey(apiKeyName, apiKeyId, apiKeyEncoded); } - private Response doUpdateUsingRandomAuthMethod(Request updateApiKeyRequest) throws IOException { + @SuppressWarnings({ "unchecked" }) + private void doTestUpdateApiKeyUsingBulkAction( + final String apiKeyName, + final String apiKeyId, + final String apiKeyEncoded, + final Map oldMetadata + ) throws IOException { + final var bulkUpdateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update"); + final boolean updated = randomBoolean(); + final Map expectedApiKeyMetadata = updated ? Map.of("not", "returned (changed)", "foo", "bar") : oldMetadata; + final Map bulkUpdateApiKeyRequestBody = expectedApiKeyMetadata == null + ? Map.of("ids", List.of(apiKeyId)) + : Map.of("ids", List.of(apiKeyId), "metadata", expectedApiKeyMetadata); + bulkUpdateApiKeyRequest.setJsonEntity( + XContentTestUtils.convertToXContent(bulkUpdateApiKeyRequestBody, XContentType.JSON).utf8ToString() + ); + + final Response bulkUpdateApiKeyResponse = performRequestUsingRandomAuthMethod(bulkUpdateApiKeyRequest); + + assertOK(bulkUpdateApiKeyResponse); + final Map bulkUpdateApiKeyResponseMap = responseAsMap(bulkUpdateApiKeyResponse); + assertThat(bulkUpdateApiKeyResponseMap, not(hasKey("errors"))); + if (updated) { + assertThat((List) bulkUpdateApiKeyResponseMap.get("noops"), empty()); + assertThat((List) bulkUpdateApiKeyResponseMap.get("updated"), contains(apiKeyId)); + } else { + assertThat((List) bulkUpdateApiKeyResponseMap.get("updated"), empty()); + assertThat((List) bulkUpdateApiKeyResponseMap.get("noops"), contains(apiKeyId)); + } + expectMetadata(apiKeyId, expectedApiKeyMetadata == null ? Map.of() : expectedApiKeyMetadata); + // validate authentication still works after update + doTestAuthenticationWithApiKey(apiKeyName, apiKeyId, apiKeyEncoded); + } + + private Response performRequestUsingRandomAuthMethod(final Request request) throws IOException { final boolean useRunAs = randomBoolean(); if (useRunAs) { - updateApiKeyRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader(RUN_AS_USER_HEADER, MANAGE_OWN_API_KEY_USER)); - return adminClient().performRequest(updateApiKeyRequest); + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader(RUN_AS_USER_HEADER, MANAGE_OWN_API_KEY_USER)); + return adminClient().performRequest(request); } else { - updateApiKeyRequest.setOptions( + request.setOptions( RequestOptions.DEFAULT.toBuilder() .addHeader("Authorization", headerFromRandomAuthMethod(MANAGE_OWN_API_KEY_USER, END_USER_PASSWORD)) ); - return client().performRequest(updateApiKeyRequest); + return client().performRequest(request); } } + private EncodedApiKey createApiKey(final String apiKeyName, final Map metadata) throws IOException { + final Map createApiKeyRequestBody = Map.of("name", apiKeyName, "metadata", metadata); + + final Request createApiKeyRequest = new Request("POST", "_security/api_key"); + createApiKeyRequest.setJsonEntity(XContentTestUtils.convertToXContent(createApiKeyRequestBody, XContentType.JSON).utf8ToString()); + createApiKeyRequest.setOptions( + RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", headerFromRandomAuthMethod(MANAGE_OWN_API_KEY_USER, END_USER_PASSWORD)) + ); + + final Response createApiKeyResponse = client().performRequest(createApiKeyRequest); + final Map createApiKeyResponseMap = responseAsMap(createApiKeyResponse); + final var apiKeyId = (String) createApiKeyResponseMap.get("id"); + final var apiKeyEncoded = (String) createApiKeyResponseMap.get("encoded"); + final var actualApiKeyName = (String) createApiKeyResponseMap.get("name"); + assertThat(apiKeyId, not(emptyString())); + assertThat(apiKeyEncoded, not(emptyString())); + assertThat(apiKeyName, equalTo(actualApiKeyName)); + + return new EncodedApiKey(apiKeyId, apiKeyEncoded, actualApiKeyName); + } + private String headerFromRandomAuthMethod(final String username, final SecureString password) throws IOException { final boolean useBearerTokenAuth = randomBoolean(); if (useBearerTokenAuth) { @@ -343,4 +474,12 @@ private void expectMetadata(final String apiKeyId, final Map exp assertThat(apiKeyResponse.getApiKeyInfos()[0].getMetadata(), equalTo(expectedMetadata)); } } + + private void expectErrorFields(final String type, final String reason, final Map rawError) { + assertNotNull(rawError); + assertEquals(type, rawError.get("type")); + assertEquals(reason, rawError.get("reason")); + } + + private record EncodedApiKey(String id, String encoded, String name) {} } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 52087742d92d4..1dda9e913f434 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -54,6 +54,9 @@ import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheResponse; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilder; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; @@ -124,6 +127,7 @@ import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7; import static org.elasticsearch.xpack.security.Security.SECURITY_CRYPTO_THREAD_POOL_NAME; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; +import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -1445,7 +1449,7 @@ public void testSecurityIndexStateChangeWillInvalidateApiKeyCaches() throws Exce }); } - public void testUpdateApiKey() throws ExecutionException, InterruptedException, IOException { + public void testUpdateApiKeysForSingleKey() throws Exception { final Tuple> createdApiKey = createApiKey(TEST_USER_NAME, null); final var apiKeyId = createdApiKey.v1().getId(); final Map oldMetadata = createdApiKey.v2(); @@ -1463,7 +1467,7 @@ public void testUpdateApiKey() throws ExecutionException, InterruptedException, ); final var request = new UpdateApiKeyRequest(apiKeyId, newRoleDescriptors, ApiKeyTests.randomMetadata()); - final UpdateApiKeyResponse response = executeUpdateApiKey(TEST_USER_NAME, request); + final UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, request); assertNotNull(response); // In this test, non-null roleDescriptors always result in an update since they either update the role name, or associated @@ -1522,7 +1526,126 @@ public void testUpdateApiKey() throws ExecutionException, InterruptedException, } } - public void testUpdateApiKeyAutoUpdatesUserFields() throws IOException, ExecutionException, InterruptedException { + public void testBulkUpdateApiKeysForMultipleKeys() throws ExecutionException, InterruptedException, IOException { + final Tuple, List>> apiKeys = createApiKeys( + TEST_USER_NAME, + randomIntBetween(3, 5), + null + ); + final List apiKeyIds = apiKeys.v1().stream().map(CreateApiKeyResponse::getId).toList(); + final List newRoleDescriptors = randomValueOtherThan(null, this::randomRoleDescriptors); + final Map newMetadata = randomValueOtherThan(null, ApiKeyTests::randomMetadata); + + BulkUpdateApiKeyResponse response = executeBulkUpdateApiKey( + TEST_USER_NAME, + new BulkUpdateApiKeyRequest(apiKeyIds, newRoleDescriptors, newMetadata) + ); + + assertNotNull(response); + assertThat(response.getErrorDetails(), anEmptyMap()); + final List allIds = Stream.concat(response.getUpdated().stream(), response.getNoops().stream()).toList(); + assertThat(allIds, containsInAnyOrder(apiKeyIds.toArray())); + // Role descriptor corresponding to SecuritySettingsSource.TEST_ROLE_YML + final var expectedLimitedByRoleDescriptors = Set.of( + new RoleDescriptor( + TEST_ROLE, + new String[] { "ALL" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").allowRestrictedIndices(true).privileges("ALL").build() }, + null + ) + ); + for (String apiKeyId : apiKeyIds) { + final Map doc = getApiKeyDocument(apiKeyId); + expectRoleDescriptorsForApiKey("role_descriptors", newRoleDescriptors, doc); + expectRoleDescriptorsForApiKey("limited_by_role_descriptors", expectedLimitedByRoleDescriptors, doc); + expectMetadataForApiKey(newMetadata, doc); + } + + // Check that bulk update works when there are no actual updates + final List newIds = new ArrayList<>(apiKeyIds); + // include not found ID to force error + final List notFoundIds = randomList( + 1, + 5, + () -> randomValueOtherThanMany(apiKeyIds::contains, () -> randomAlphaOfLength(10)) + ); + newIds.addAll(notFoundIds); + final BulkUpdateApiKeyRequest request = new BulkUpdateApiKeyRequest(shuffledList(newIds), newRoleDescriptors, newMetadata); + + response = executeBulkUpdateApiKey(TEST_USER_NAME, request); + + assertNotNull(response); + assertThat(response.getUpdated(), empty()); + assertEquals(apiKeyIds.size(), response.getNoops().size()); + assertThat(response.getNoops(), containsInAnyOrder(apiKeyIds.toArray())); + assertThat(response.getErrorDetails().keySet(), containsInAnyOrder(notFoundIds.toArray())); + for (String apiKeyId : apiKeyIds) { + final Map doc = getApiKeyDocument(apiKeyId); + expectRoleDescriptorsForApiKey("role_descriptors", newRoleDescriptors, doc); + expectRoleDescriptorsForApiKey("limited_by_role_descriptors", expectedLimitedByRoleDescriptors, doc); + expectMetadataForApiKey(newMetadata, doc); + } + + // Check that bulk update works when some or all updates result in errors + final List invalidatedIds = randomNonEmptySubsetOf(apiKeyIds); + getSecurityClient().invalidateApiKeys(invalidatedIds.toArray(new String[0])); + final List expectedSuccessfulIds = apiKeyIds.stream().filter(i -> invalidatedIds.contains(i) == false).toList(); + final BulkUpdateApiKeyRequest requestWithSomeErrors = new BulkUpdateApiKeyRequest( + shuffledList(apiKeyIds), + randomValueOtherThan(null, this::randomRoleDescriptors), + randomValueOtherThan(null, ApiKeyTests::randomMetadata) + ); + + response = executeBulkUpdateApiKey(TEST_USER_NAME, requestWithSomeErrors); + + final List allSuccessfulIds = Stream.concat(response.getUpdated().stream(), response.getNoops().stream()).toList(); + assertThat(allSuccessfulIds, containsInAnyOrder(expectedSuccessfulIds.toArray())); + assertThat(response.getErrorDetails().keySet(), containsInAnyOrder(invalidatedIds.toArray())); + } + + public void testBulkUpdateApiKeysWithDuplicates() throws ExecutionException, InterruptedException { + final Tuple, List>> apiKeys = createApiKeys( + TEST_USER_NAME, + randomIntBetween(3, 5), + null + ); + final List apiKeyIds = apiKeys.v1().stream().map(CreateApiKeyResponse::getId).toList(); + final List newRoleDescriptors = randomValueOtherThan(null, this::randomRoleDescriptors); + final Map newMetadata = randomValueOtherThan(null, ApiKeyTests::randomMetadata); + final List idsWithDuplicates = shuffledList(Stream.concat(apiKeyIds.stream(), apiKeyIds.stream()).toList()); + assertEquals(idsWithDuplicates.size(), apiKeyIds.size() * 2); + + BulkUpdateApiKeyResponse response = executeBulkUpdateApiKey( + TEST_USER_NAME, + new BulkUpdateApiKeyRequest(idsWithDuplicates, newRoleDescriptors, newMetadata) + ); + + assertNotNull(response); + assertThat(response.getErrorDetails(), anEmptyMap()); + final List allIds = Stream.concat(response.getUpdated().stream(), response.getNoops().stream()).toList(); + assertThat(allIds, containsInAnyOrder(apiKeyIds.toArray())); + + // Check not found errors reported for all unique IDs + final List notFoundIds = randomList( + 1, + 5, + () -> randomValueOtherThanMany(apiKeyIds::contains, () -> randomAlphaOfLength(10)) + ); + final List notFoundIdsWithDuplicates = shuffledList(Stream.concat(notFoundIds.stream(), notFoundIds.stream()).toList()); + + response = executeBulkUpdateApiKey( + TEST_USER_NAME, + new BulkUpdateApiKeyRequest(notFoundIdsWithDuplicates, newRoleDescriptors, newMetadata) + ); + + assertNotNull(response); + assertThat(response.getErrorDetails().keySet(), containsInAnyOrder(notFoundIds.toArray())); + assertThat(response.getUpdated(), empty()); + assertThat(response.getNoops(), empty()); + } + + public void testUpdateApiKeysAutoUpdatesUserFields() throws Exception { // Create separate native realm user and role for user role change test final var nativeRealmUser = randomAlphaOfLengthBetween(5, 10); final var nativeRealmRole = randomAlphaOfLengthBetween(5, 10); @@ -1563,7 +1686,10 @@ public void testUpdateApiKeyAutoUpdatesUserFields() throws IOException, Executio newClusterPrivileges.toArray(new String[0]) ); - UpdateApiKeyResponse response = executeUpdateApiKey(nativeRealmUser, UpdateApiKeyRequest.usingApiKeyId(apiKeyId)); + UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction( + nativeRealmUser, + UpdateApiKeyRequest.usingApiKeyId(apiKeyId) + ); assertNotNull(response); assertTrue(response.isUpdated()); @@ -1582,7 +1708,7 @@ public void testUpdateApiKeyAutoUpdatesUserFields() throws IOException, Executio updateUser(updatedUser); // Update API key - response = executeUpdateApiKey(nativeRealmUser, UpdateApiKeyRequest.usingApiKeyId(apiKeyId)); + response = updateSingleApiKeyMaybeUsingBulkAction(nativeRealmUser, UpdateApiKeyRequest.usingApiKeyId(apiKeyId)); assertNotNull(response); assertTrue(response.isUpdated()); @@ -1598,24 +1724,24 @@ public void testUpdateApiKeyAutoUpdatesUserFields() throws IOException, Executio expectCreatorForApiKey(expectedCreator, updatedApiKeyDoc); } - public void testUpdateApiKeyNotFoundScenarios() throws ExecutionException, InterruptedException { + public void testUpdateApiKeysNotFoundScenarios() throws Exception { final Tuple> createdApiKey = createApiKey(TEST_USER_NAME, null); final var apiKeyId = createdApiKey.v1().getId(); final var expectedRoleDescriptor = new RoleDescriptor(randomAlphaOfLength(10), new String[] { "all" }, null, null); final var request = new UpdateApiKeyRequest(apiKeyId, List.of(expectedRoleDescriptor), ApiKeyTests.randomMetadata()); // Validate can update own API key - final UpdateApiKeyResponse response = executeUpdateApiKey(TEST_USER_NAME, request); + final UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, request); assertNotNull(response); assertTrue(response.isUpdated()); // Test not found exception on non-existent API key final var otherApiKeyId = randomValueOtherThan(apiKeyId, () -> randomAlphaOfLength(20)); - doTestUpdateApiKeyNotFound(new UpdateApiKeyRequest(otherApiKeyId, request.getRoleDescriptors(), request.getMetadata())); + doTestUpdateApiKeysNotFound(new UpdateApiKeyRequest(otherApiKeyId, request.getRoleDescriptors(), request.getMetadata())); // Test not found exception on other user's API key final Tuple> otherUsersApiKey = createApiKey("user_with_manage_api_key_role", null); - doTestUpdateApiKeyNotFound( + doTestUpdateApiKeysNotFound( new UpdateApiKeyRequest(otherUsersApiKey.v1().getId(), request.getRoleDescriptors(), request.getMetadata()) ); @@ -1635,12 +1761,12 @@ public void testUpdateApiKeyNotFoundScenarios() throws ExecutionException, Inter null, "all" ).v1().get(0); - doTestUpdateApiKeyNotFound( + doTestUpdateApiKeysNotFound( new UpdateApiKeyRequest(apiKeyForNativeRealmUser.getId(), request.getRoleDescriptors(), request.getMetadata()) ); } - public void testInvalidUpdateApiKeyScenarios() throws ExecutionException, InterruptedException { + public void testInvalidUpdateApiKeysScenarios() throws ExecutionException, InterruptedException { final List apiKeyPrivileges = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names())); // At a minimum include privilege to manage own API key to ensure no 403 apiKeyPrivileges.add(randomFrom("manage_api_key", "manage_own_api_key")); @@ -1650,7 +1776,7 @@ public void testInvalidUpdateApiKeyScenarios() throws ExecutionException, Interr final var roleDescriptor = new RoleDescriptor(randomAlphaOfLength(10), new String[] { "manage_own_api_key" }, null, null); final var request = new UpdateApiKeyRequest(apiKeyId, List.of(roleDescriptor), ApiKeyTests.randomMetadata()); - PlainActionFuture updateListener = new PlainActionFuture<>(); + final PlainActionFuture updateListener = new PlainActionFuture<>(); client().filterWithHeader( Collections.singletonMap( "Authorization", @@ -1683,14 +1809,10 @@ public void testInvalidUpdateApiKeyScenarios() throws ExecutionException, Interr assertThat(expirationDateUpdatedResponse.getResult(), is(DocWriteResponse.Result.UPDATED)); } - updateListener = new PlainActionFuture<>(); - final Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(TEST_USER_NAME, TEST_PASSWORD_SECURE_STRING)) + final var ex = expectThrowsWithUnwrappedExecutionException( + IllegalArgumentException.class, + () -> updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, request) ); - client.execute(UpdateApiKeyAction.INSTANCE, request, updateListener); - final var ex = expectThrows(ExecutionException.class, updateListener::get); - - assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); if (invalidated) { assertThat(ex.getMessage(), containsString("cannot update invalidated API key [" + apiKeyId + "]")); } else { @@ -1698,12 +1820,11 @@ public void testInvalidUpdateApiKeyScenarios() throws ExecutionException, Interr } } - public void testUpdateApiKeyAccountsForSecurityDomains() throws ExecutionException, InterruptedException, IOException { + public void testUpdateApiKeysAccountsForSecurityDomains() throws Exception { final Tuple> createdApiKey = createApiKey(TEST_USER_NAME, null); final var apiKeyId = createdApiKey.v1().getId(); final ServiceWithNodeName serviceWithNodeName = getServiceWithNodeName(); - final PlainActionFuture listener = new PlainActionFuture<>(); final RealmConfig.RealmIdentifier creatorRealmOnCreatedApiKey = new RealmConfig.RealmIdentifier(FileRealmSettings.TYPE, "file"); final RealmConfig.RealmIdentifier otherRealmInDomain = AuthenticationTestHelper.randomRealmIdentifier(true); final var realmDomain = new RealmDomain( @@ -1726,11 +1847,14 @@ public void testUpdateApiKeyAccountsForSecurityDomains() throws ExecutionExcepti ) .build() ); - serviceWithNodeName.service().updateApiKey(authentication, UpdateApiKeyRequest.usingApiKeyId(apiKeyId), Set.of(), listener); - final UpdateApiKeyResponse response = listener.get(); + final BulkUpdateApiKeyResponse response = updateApiKeys( + serviceWithNodeName.service(), + authentication, + BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId), + Set.of() + ); - assertNotNull(response); - assertTrue(response.isUpdated()); + assertSingleUpdate(apiKeyId, response); final Map expectedCreator = new HashMap<>(); expectedCreator.put("principal", TEST_USER_NAME); expectedCreator.put("full_name", null); @@ -1743,7 +1867,7 @@ public void testUpdateApiKeyAccountsForSecurityDomains() throws ExecutionExcepti expectCreatorForApiKey(expectedCreator, getApiKeyDocument(apiKeyId)); } - public void testNoopUpdateApiKey() throws ExecutionException, InterruptedException, IOException { + public void testUpdateApiKeysNoopScenarios() throws Exception { final Tuple> createdApiKey = createApiKey(TEST_USER_NAME, null); final var apiKeyId = createdApiKey.v1().getId(); @@ -1754,7 +1878,7 @@ public void testNoopUpdateApiKey() throws ExecutionException, InterruptedExcepti // metadata updates are non-noops randomValueOtherThanMany(Objects::isNull, ApiKeyTests::randomMetadata) ); - UpdateApiKeyResponse response = executeUpdateApiKey(TEST_USER_NAME, initialRequest); + UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, initialRequest); assertNotNull(response); // First update is not noop, because role descriptors changed and possibly metadata assertTrue(response.isUpdated()); @@ -1767,13 +1891,13 @@ public void testNoopUpdateApiKey() throws ExecutionException, InterruptedExcepti .findFirst() .orElseThrow(); final int count = serviceWithNameForDoc1.getDocCache().count(); - response = executeUpdateApiKey(TEST_USER_NAME, initialRequest); + response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, initialRequest); assertNotNull(response); assertFalse(response.isUpdated()); assertEquals(count, serviceWithNameForDoc1.getDocCache().count()); // Update with empty request is a noop - response = executeUpdateApiKey(TEST_USER_NAME, UpdateApiKeyRequest.usingApiKeyId(apiKeyId)); + response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, UpdateApiKeyRequest.usingApiKeyId(apiKeyId)); assertNotNull(response); assertFalse(response.isUpdated()); @@ -1788,12 +1912,12 @@ public void testNoopUpdateApiKey() throws ExecutionException, InterruptedExcepti () -> RoleDescriptorTests.randomRoleDescriptor(false) ) ); - response = executeUpdateApiKey(TEST_USER_NAME, new UpdateApiKeyRequest(apiKeyId, newRoleDescriptors, null)); + response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, new UpdateApiKeyRequest(apiKeyId, newRoleDescriptors, null)); assertNotNull(response); assertTrue(response.isUpdated()); // Update with re-ordered role descriptors is a noop - response = executeUpdateApiKey( + response = updateSingleApiKeyMaybeUsingBulkAction( TEST_USER_NAME, new UpdateApiKeyRequest(apiKeyId, List.of(newRoleDescriptors.get(1), newRoleDescriptors.get(0)), null) ); @@ -1801,7 +1925,7 @@ public void testNoopUpdateApiKey() throws ExecutionException, InterruptedExcepti assertFalse(response.isUpdated()); // Update with different metadata is not a noop - response = executeUpdateApiKey( + response = updateSingleApiKeyMaybeUsingBulkAction( TEST_USER_NAME, new UpdateApiKeyRequest( apiKeyId, @@ -1816,7 +1940,6 @@ public void testNoopUpdateApiKey() throws ExecutionException, InterruptedExcepti // First, ensure that the user role descriptors alone do *not* cause an update, so we can test that we correctly perform the noop // check when we update creator info final ServiceWithNodeName serviceWithNodeName = getServiceWithNodeName(); - PlainActionFuture listener = new PlainActionFuture<>(); // Role descriptor corresponding to SecuritySettingsSource.TEST_ROLE_YML, i.e., should not result in update final Set oldUserRoleDescriptors = Set.of( new RoleDescriptor( @@ -1827,19 +1950,18 @@ public void testNoopUpdateApiKey() throws ExecutionException, InterruptedExcepti null ) ); - serviceWithNodeName.service() - .updateApiKey( + assertSingleNoop( + apiKeyId, + updateApiKeys( + serviceWithNodeName.service(), Authentication.newRealmAuthentication( new User(TEST_USER_NAME, TEST_ROLE), new Authentication.RealmRef("file", "file", serviceWithNodeName.nodeName()) ), - UpdateApiKeyRequest.usingApiKeyId(apiKeyId), - oldUserRoleDescriptors, - listener - ); - response = listener.get(); - assertNotNull(response); - assertFalse(response.isUpdated()); + BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId), + oldUserRoleDescriptors + ) + ); final User updatedUser = AuthenticationTestHelper.userWithRandomMetadataAndDetails(TEST_USER_NAME, TEST_ROLE); final RealmConfig.RealmIdentifier creatorRealmOnCreatedApiKey = new RealmConfig.RealmIdentifier(FileRealmSettings.TYPE, "file"); final boolean noUserChanges = updatedUser.equals(new User(TEST_USER_NAME, TEST_ROLE)); @@ -1868,15 +1990,18 @@ public void testNoopUpdateApiKey() throws ExecutionException, InterruptedExcepti Authentication::isApiKey, () -> AuthenticationTestHelper.builder().user(updatedUser).realmRef(realmRef).build() ); - listener = new PlainActionFuture<>(); - serviceWithNodeName.service() - .updateApiKey(authentication, UpdateApiKeyRequest.usingApiKeyId(apiKeyId), oldUserRoleDescriptors, listener); - response = listener.get(); - assertNotNull(response); - assertTrue(response.isUpdated()); + assertSingleUpdate( + apiKeyId, + updateApiKeys( + serviceWithNodeName.service(), + authentication, + BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId), + oldUserRoleDescriptors + ) + ); } - public void testUpdateApiKeyAutoUpdatesLegacySuperuserRoleDescriptor() throws ExecutionException, InterruptedException, IOException { + public void testUpdateApiKeysAutoUpdatesLegacySuperuserRoleDescriptor() throws Exception { final Tuple> createdApiKey = createApiKey(TEST_USER_NAME, null); final var apiKeyId = createdApiKey.v1().getId(); final ServiceWithNodeName serviceWithNodeName = getServiceWithNodeName(); @@ -1885,35 +2010,43 @@ public void testUpdateApiKeyAutoUpdatesLegacySuperuserRoleDescriptor() throws Ex new Authentication.RealmRef("file", "file", serviceWithNodeName.nodeName()) ); final Set legacySuperuserRoleDescriptor = Set.of(ApiKeyService.LEGACY_SUPERUSER_ROLE_DESCRIPTOR); - PlainActionFuture listener = new PlainActionFuture<>(); // Force set user role descriptors to 7.x legacy superuser role descriptors - serviceWithNodeName.service() - .updateApiKey(authentication, UpdateApiKeyRequest.usingApiKeyId(apiKeyId), legacySuperuserRoleDescriptor, listener); - UpdateApiKeyResponse response = listener.get(); - assertNotNull(response); - assertTrue(response.isUpdated()); + assertSingleUpdate( + apiKeyId, + updateApiKeys( + serviceWithNodeName.service(), + authentication, + BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId), + legacySuperuserRoleDescriptor + ) + ); expectRoleDescriptorsForApiKey("limited_by_role_descriptors", legacySuperuserRoleDescriptor, getApiKeyDocument(apiKeyId)); final Set currentSuperuserRoleDescriptors = Set.of(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR); - PlainActionFuture listener2 = new PlainActionFuture<>(); - serviceWithNodeName.service() - .updateApiKey(authentication, UpdateApiKeyRequest.usingApiKeyId(apiKeyId), currentSuperuserRoleDescriptors, listener2); - response = listener2.get(); - assertNotNull(response); // The first request is not a noop because we are auto-updating the legacy role descriptors to 8.x role descriptors - assertTrue(response.isUpdated()); + assertSingleUpdate( + apiKeyId, + updateApiKeys( + serviceWithNodeName.service(), + authentication, + BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId), + currentSuperuserRoleDescriptors + ) + ); expectRoleDescriptorsForApiKey("limited_by_role_descriptors", currentSuperuserRoleDescriptors, getApiKeyDocument(apiKeyId)); - - PlainActionFuture listener3 = new PlainActionFuture<>(); - serviceWithNodeName.service() - .updateApiKey(authentication, UpdateApiKeyRequest.usingApiKeyId(apiKeyId), currentSuperuserRoleDescriptors, listener3); - response = listener3.get(); - assertNotNull(response); // Second update is noop because role descriptors were auto-updated by the previous request - assertFalse(response.isUpdated()); + assertSingleNoop( + apiKeyId, + updateApiKeys( + serviceWithNodeName.service(), + authentication, + BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId), + currentSuperuserRoleDescriptors + ) + ); } - public void testUpdateApiKeyClearsApiKeyDocCache() throws IOException, ExecutionException, InterruptedException { + public void testUpdateApiKeysClearsApiKeyDocCache() throws Exception { final List services = Arrays.stream(internalCluster().getNodeNames()) .map(n -> new ServiceWithNodeName(internalCluster().getInstance(ApiKeyService.class, n), n)) .toList(); @@ -1948,17 +2081,16 @@ public void testUpdateApiKeyClearsApiKeyDocCache() throws IOException, Execution final int serviceForDoc2AuthCacheCount = serviceForDoc2.getApiKeyAuthCache().count(); // Update the first key - final PlainActionFuture listener = new PlainActionFuture<>(); - final Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); - client.execute( - UpdateApiKeyAction.INSTANCE, - // Set metadata to ensure update - new UpdateApiKeyRequest(apiKey1.v1(), List.of(), Map.of(randomAlphaOfLength(5), randomAlphaOfLength(10))), - listener + final UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction( + ES_TEST_ROOT_USER, + new UpdateApiKeyRequest( + apiKey1.v1(), + List.of(), + // Set metadata to ensure update + Map.of(randomAlphaOfLength(5), randomAlphaOfLength(10)) + ) ); - final var response = listener.get(); + assertNotNull(response); assertTrue(response.isUpdated()); @@ -2002,14 +2134,11 @@ private List randomRoleDescriptors() { }; } - private void doTestUpdateApiKeyNotFound(UpdateApiKeyRequest request) { - final PlainActionFuture listener = new PlainActionFuture<>(); - final Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(TEST_USER_NAME, TEST_PASSWORD_SECURE_STRING)) + private void doTestUpdateApiKeysNotFound(final UpdateApiKeyRequest request) { + final var ex = expectThrowsWithUnwrappedExecutionException( + ResourceNotFoundException.class, + () -> updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, request) ); - client.execute(UpdateApiKeyAction.INSTANCE, request, listener); - final var ex = expectThrows(ExecutionException.class, listener::get); - assertThat(ex.getCause(), instanceOf(ResourceNotFoundException.class)); assertThat(ex.getMessage(), containsString("no API key owned by requesting user found for ID [" + request.getId() + "]")); } @@ -2338,16 +2467,80 @@ private Client getClientForRunAsUser() { ); } - private UpdateApiKeyResponse executeUpdateApiKey(final String username, final UpdateApiKeyRequest request) throws InterruptedException, - ExecutionException { - final var listener = new PlainActionFuture(); + private BulkUpdateApiKeyResponse updateApiKeys( + final ApiKeyService service, + final Authentication authentication, + final BulkUpdateApiKeyRequest request, + final Set userRoleDescriptors + ) throws Exception { + final PlainActionFuture listener = new PlainActionFuture<>(); + service.updateApiKeys(authentication, request, userRoleDescriptors, listener); + return listener.get(); + } + + private void assertSingleUpdate(final String apiKeyId, final BulkUpdateApiKeyResponse response) { + assertNotNull(response); + assertThat(response.getErrorDetails(), anEmptyMap()); + assertThat(response.getNoops(), empty()); + assertThat(response.getUpdated(), contains(apiKeyId)); + } + + private void assertSingleNoop(final String apiKeyId, final BulkUpdateApiKeyResponse response) { + assertNotNull(response); + assertThat(response.getErrorDetails(), anEmptyMap()); + assertThat(response.getNoops(), contains(apiKeyId)); + assertThat(response.getUpdated(), empty()); + } + + private void assertSingleError(final String apiKeyId, final BulkUpdateApiKeyResponse response) { + assertNotNull(response); + assertThat(response.getErrorDetails().keySet(), contains(apiKeyId)); + assertThat(response.getUpdated(), empty()); + assertThat(response.getNoops(), empty()); + } + + private UpdateApiKeyResponse updateSingleApiKeyMaybeUsingBulkAction(final String username, final UpdateApiKeyRequest request) + throws Exception { + final boolean useBulkAction = randomBoolean(); + if (useBulkAction) { + final BulkUpdateApiKeyResponse response = executeBulkUpdateApiKey( + username, + new BulkUpdateApiKeyRequest(List.of(request.getId()), request.getRoleDescriptors(), request.getMetadata()) + ); + return toUpdateApiKeyResponse(request.getId(), response); + } else { + final var listener = new PlainActionFuture(); + final Client client = client().filterWithHeader( + Collections.singletonMap("Authorization", basicAuthHeaderValue(username, TEST_PASSWORD_SECURE_STRING)) + ); + client.execute(UpdateApiKeyAction.INSTANCE, request, listener); + return listener.get(); + } + } + + private BulkUpdateApiKeyResponse executeBulkUpdateApiKey(final String username, final BulkUpdateApiKeyRequest request) + throws ExecutionException, InterruptedException { + final var listener = new PlainActionFuture(); final Client client = client().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue(username, TEST_PASSWORD_SECURE_STRING)) ); - client.execute(UpdateApiKeyAction.INSTANCE, request, listener); + client.execute(BulkUpdateApiKeyAction.INSTANCE, request, listener); return listener.get(); } + private UpdateApiKeyResponse toUpdateApiKeyResponse(final String apiKeyId, final BulkUpdateApiKeyResponse response) throws Exception { + if (response.getErrorDetails().isEmpty() == false) { + assertSingleError(apiKeyId, response); + throw response.getErrorDetails().values().iterator().next(); + } else if (response.getUpdated().isEmpty() == false) { + assertSingleUpdate(apiKeyId, response); + return new UpdateApiKeyResponse(true); + } else { + assertSingleNoop(apiKeyId, response); + return new UpdateApiKeyResponse(false); + } + } + private void assertErrorMessage(final ElasticsearchSecurityException ese, String action, String userName, String apiKeyId) { assertThat( ese, @@ -2364,4 +2557,14 @@ private void assertErrorMessage(final ElasticsearchSecurityException ese, String assertThat(ese, throwableWithMessage(containsString(", this action is granted by the cluster privileges ["))); assertThat(ese, throwableWithMessage(containsString("manage_api_key,manage_security,all]"))); } + + private static T expectThrowsWithUnwrappedExecutionException(Class expectedType, ThrowingRunnable runnable) { + final var ex = expectThrowsAnyOf(List.of(expectedType, ExecutionException.class), runnable); + if (ex instanceof ExecutionException) { + assertThat(ex.getCause(), instanceOf(expectedType)); + return expectedType.cast(ex.getCause()); + } else { + return expectedType.cast(ex); + } + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 5345145cac1b5..1b829098a0188 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -94,6 +94,7 @@ import org.elasticsearch.xpack.core.security.SecuritySettings; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheAction; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; @@ -176,6 +177,7 @@ import org.elasticsearch.xpack.core.ssl.rest.RestGetCertificateInfoAction; import org.elasticsearch.xpack.security.action.TransportClearSecurityCacheAction; import org.elasticsearch.xpack.security.action.TransportDelegatePkiAuthenticationAction; +import org.elasticsearch.xpack.security.action.apikey.TransportBulkUpdateApiKeyAction; import org.elasticsearch.xpack.security.action.apikey.TransportCreateApiKeyAction; import org.elasticsearch.xpack.security.action.apikey.TransportGetApiKeyAction; import org.elasticsearch.xpack.security.action.apikey.TransportGrantApiKeyAction; @@ -273,6 +275,7 @@ import org.elasticsearch.xpack.security.rest.SecurityRestFilter; import org.elasticsearch.xpack.security.rest.action.RestAuthenticateAction; import org.elasticsearch.xpack.security.rest.action.RestDelegatePkiAuthenticationAction; +import org.elasticsearch.xpack.security.rest.action.apikey.RestBulkUpdateApiKeyAction; import org.elasticsearch.xpack.security.rest.action.apikey.RestClearApiKeyCacheAction; import org.elasticsearch.xpack.security.rest.action.apikey.RestCreateApiKeyAction; import org.elasticsearch.xpack.security.rest.action.apikey.RestGetApiKeyAction; @@ -1227,6 +1230,7 @@ public void onIndexModule(IndexModule module) { new ActionHandler<>(GetApiKeyAction.INSTANCE, TransportGetApiKeyAction.class), new ActionHandler<>(QueryApiKeyAction.INSTANCE, TransportQueryApiKeyAction.class), new ActionHandler<>(UpdateApiKeyAction.INSTANCE, TransportUpdateApiKeyAction.class), + new ActionHandler<>(BulkUpdateApiKeyAction.INSTANCE, TransportBulkUpdateApiKeyAction.class), new ActionHandler<>(DelegatePkiAuthenticationAction.INSTANCE, TransportDelegatePkiAuthenticationAction.class), new ActionHandler<>(CreateServiceAccountTokenAction.INSTANCE, TransportCreateServiceAccountTokenAction.class), new ActionHandler<>(DeleteServiceAccountTokenAction.INSTANCE, TransportDeleteServiceAccountTokenAction.class), @@ -1305,6 +1309,7 @@ public List getRestHandlers( new RestDeletePrivilegesAction(settings, getLicenseState()), new RestCreateApiKeyAction(settings, getLicenseState()), new RestUpdateApiKeyAction(settings, getLicenseState()), + new RestBulkUpdateApiKeyAction(settings, getLicenseState()), new RestGrantApiKeyAction(settings, getLicenseState()), new RestInvalidateApiKeyAction(settings, getLicenseState()), new RestGetApiKeyAction(settings, getLicenseState()), diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java new file mode 100644 index 0000000000000..d0e00970a9d4a --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.action.apikey; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.apikey.BaseUpdateApiKeyRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.security.authc.support.ApiKeyUserRoleDescriptorResolver; +import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; + +import java.util.Set; + +public abstract class TransportBaseUpdateApiKeyAction extends + HandledTransportAction { + + private final SecurityContext securityContext; + private final ApiKeyUserRoleDescriptorResolver resolver; + + protected TransportBaseUpdateApiKeyAction( + final String actionName, + final TransportService transportService, + final ActionFilters actionFilters, + final Writeable.Reader requestReader, + final SecurityContext context, + final CompositeRolesStore rolesStore, + final NamedXContentRegistry xContentRegistry + ) { + super(actionName, transportService, actionFilters, requestReader); + this.securityContext = context; + this.resolver = new ApiKeyUserRoleDescriptorResolver(rolesStore, xContentRegistry); + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + final var authentication = securityContext.getAuthentication(); + if (authentication == null) { + listener.onFailure(new IllegalStateException("authentication is required")); + return; + } else if (authentication.isApiKey()) { + listener.onFailure( + new IllegalArgumentException("authentication via API key not supported: only the owner user can update an API key") + ); + return; + } + + resolver.resolveUserRoleDescriptors( + authentication, + ActionListener.wrap( + roleDescriptors -> doExecuteUpdate(task, request, authentication, roleDescriptors, listener), + listener::onFailure + ) + ); + } + + abstract void doExecuteUpdate( + Task task, + Request request, + Authentication authentication, + Set roleDescriptors, + ActionListener listener + ); +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBulkUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBulkUpdateApiKeyAction.java new file mode 100644 index 0000000000000..7d3432cef8314 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBulkUpdateApiKeyAction.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.action.apikey; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.security.authc.ApiKeyService; +import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; + +import java.util.Set; + +public final class TransportBulkUpdateApiKeyAction extends TransportBaseUpdateApiKeyAction< + BulkUpdateApiKeyRequest, + BulkUpdateApiKeyResponse> { + + private final ApiKeyService apiKeyService; + + @Inject + public TransportBulkUpdateApiKeyAction( + final TransportService transportService, + final ActionFilters actionFilters, + final ApiKeyService apiKeyService, + final SecurityContext context, + final CompositeRolesStore rolesStore, + final NamedXContentRegistry xContentRegistry + ) { + super( + BulkUpdateApiKeyAction.NAME, + transportService, + actionFilters, + BulkUpdateApiKeyRequest::new, + context, + rolesStore, + xContentRegistry + ); + this.apiKeyService = apiKeyService; + } + + @Override + void doExecuteUpdate( + final Task task, + final BulkUpdateApiKeyRequest request, + final Authentication authentication, + final Set roleDescriptors, + final ActionListener listener + ) { + apiKeyService.updateApiKeys(authentication, request, roleDescriptors, listener); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateApiKeyAction.java index 6b28f7d601420..19bfeb7ff378b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateApiKeyAction.java @@ -9,24 +9,27 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.security.authc.ApiKeyService; -import org.elasticsearch.xpack.security.authc.support.ApiKeyUserRoleDescriptorResolver; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; -public final class TransportUpdateApiKeyAction extends HandledTransportAction { +import java.util.Map; +import java.util.Set; + +public final class TransportUpdateApiKeyAction extends TransportBaseUpdateApiKeyAction { - private final SecurityContext securityContext; private final ApiKeyService apiKeyService; - private final ApiKeyUserRoleDescriptorResolver resolver; @Inject public TransportUpdateApiKeyAction( @@ -37,31 +40,56 @@ public TransportUpdateApiKeyAction( final CompositeRolesStore rolesStore, final NamedXContentRegistry xContentRegistry ) { - super(UpdateApiKeyAction.NAME, transportService, actionFilters, UpdateApiKeyRequest::new); - this.securityContext = context; + super(UpdateApiKeyAction.NAME, transportService, actionFilters, UpdateApiKeyRequest::new, context, rolesStore, xContentRegistry); this.apiKeyService = apiKeyService; - this.resolver = new ApiKeyUserRoleDescriptorResolver(rolesStore, xContentRegistry); } @Override - protected void doExecute(Task task, UpdateApiKeyRequest request, ActionListener listener) { - final var authentication = securityContext.getAuthentication(); - if (authentication == null) { - listener.onFailure(new IllegalStateException("authentication is required")); - return; - } else if (authentication.isApiKey()) { - listener.onFailure( - new IllegalArgumentException("authentication via API key not supported: only the owner user can update an API key") + void doExecuteUpdate( + final Task task, + final UpdateApiKeyRequest request, + final Authentication authentication, + final Set roleDescriptors, + final ActionListener listener + ) { + apiKeyService.updateApiKeys( + authentication, + BulkUpdateApiKeyRequest.wrap(request), + roleDescriptors, + ActionListener.wrap(bulkResponse -> listener.onResponse(toSingleResponse(request.getId(), bulkResponse)), listener::onFailure) + ); + } + + private UpdateApiKeyResponse toSingleResponse(final String apiKeyId, final BulkUpdateApiKeyResponse response) throws Exception { + if (response.getTotalResultCount() != 1) { + throw new IllegalStateException( + "single result required for single API key update but result count was [" + response.getTotalResultCount() + "]" ); - return; } + if (response.getErrorDetails().isEmpty() == false) { + final Map.Entry errorEntry = response.getErrorDetails().entrySet().iterator().next(); + if (errorEntry.getKey().equals(apiKeyId) == false) { + throwIllegalStateExceptionOnIdMismatch(apiKeyId, errorEntry.getKey()); + } + throw errorEntry.getValue(); + } else if (response.getUpdated().isEmpty() == false) { + final String updatedId = response.getUpdated().get(0); + if (updatedId.equals(apiKeyId) == false) { + throwIllegalStateExceptionOnIdMismatch(apiKeyId, updatedId); + } + return new UpdateApiKeyResponse(true); + } else { + final String noopId = response.getNoops().get(0); + if (noopId.equals(apiKeyId) == false) { + throwIllegalStateExceptionOnIdMismatch(apiKeyId, noopId); + } + return new UpdateApiKeyResponse(false); + } + } - resolver.resolveUserRoleDescriptors( - authentication, - ActionListener.wrap( - roleDescriptors -> apiKeyService.updateApiKey(authentication, request, roleDescriptors, listener), - listener::onFailure - ) - ); + private void throwIllegalStateExceptionOnIdMismatch(final String requestId, final String responseId) { + final String message = "response ID [" + responseId + "] does not match request ID [" + requestId + "] for single API key update"; + assert false : message; + throw new IllegalStateException(message); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 2279db42db91d..a9e1a116292d5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -83,13 +83,14 @@ import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheRequest; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheResponse; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; +import org.elasticsearch.xpack.core.security.action.apikey.BaseUpdateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyResponse; -import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; @@ -355,11 +356,11 @@ private void createApiKeyAndIndexIt( })); } - public void updateApiKey( + public void updateApiKeys( final Authentication authentication, - final UpdateApiKeyRequest request, + final BulkUpdateApiKeyRequest request, final Set userRoleDescriptors, - final ActionListener listener + final ActionListener listener ) { ensureEnabled(); @@ -373,25 +374,75 @@ public void updateApiKey( return; } - logger.debug("Updating API key [{}]", request.getId()); - - findVersionedApiKeyDocsForSubject(authentication, new String[] { request.getId() }, ActionListener.wrap((versionedDocs) -> { - final var apiKeyId = request.getId(); + logger.debug("Updating [{}] API keys", request.getIds().size()); + findVersionedApiKeyDocsForSubject( + authentication, + request.getIds().toArray(new String[0]), + ActionListener.wrap( + versionedDocs -> updateApiKeys(authentication, request, userRoleDescriptors, versionedDocs, listener), + ex -> listener.onFailure(traceLog("bulk update", ex)) + ) + ); + } - if (versionedDocs.isEmpty()) { - throw new ResourceNotFoundException("no API key owned by requesting user found for ID [" + apiKeyId + "]"); + private void updateApiKeys( + final Authentication authentication, + final BulkUpdateApiKeyRequest request, + final Set userRoleDescriptors, + final Collection targetVersionedDocs, + final ActionListener listener + ) { + logger.trace("Found [{}] API keys of [{}] requested for update", targetVersionedDocs.size(), request.getIds().size()); + assert targetVersionedDocs.size() <= request.getIds().size() + : "more docs were found for update than were requested. found: " + + targetVersionedDocs.size() + + " requested: " + + request.getIds().size(); + + final BulkUpdateApiKeyResponse.Builder responseBuilder = BulkUpdateApiKeyResponse.builder(); + final BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); + for (VersionedApiKeyDoc versionedDoc : targetVersionedDocs) { + final String apiKeyId = versionedDoc.id(); + try { + validateForUpdate(apiKeyId, authentication, versionedDoc.doc()); + final IndexRequest indexRequest = maybeBuildIndexRequest(versionedDoc, authentication, request, userRoleDescriptors); + final boolean isNoop = indexRequest == null; + if (isNoop) { + logger.debug("Detected noop update request for API key [{}]. Skipping index request", apiKeyId); + responseBuilder.noop(apiKeyId); + } else { + bulkRequestBuilder.add(indexRequest); + } + } catch (Exception ex) { + responseBuilder.error(apiKeyId, traceLog("prepare index request for update", ex)); } + } + addErrorsForNotFoundApiKeys(responseBuilder, targetVersionedDocs, request.getIds()); + if (bulkRequestBuilder.numberOfActions() == 0) { + logger.trace("No bulk request execution necessary for API key update"); + listener.onResponse(responseBuilder.build()); + return; + } - final VersionedApiKeyDoc versionedDoc = singleDoc(apiKeyId, versionedDocs); - - validateCurrentApiKeyDocForUpdate(apiKeyId, authentication, versionedDoc.doc()); - - doUpdateApiKey(authentication, request, userRoleDescriptors, versionedDoc, listener); - }, ex -> listener.onFailure(traceLog("update", ex)))); + logger.trace("Executing bulk request to update [{}] API keys", bulkRequestBuilder.numberOfActions()); + bulkRequestBuilder.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); + securityIndex.prepareIndexIfNeededThenExecute( + ex -> listener.onFailure(traceLog("prepare security index before update", ex)), + () -> executeAsyncWithOrigin( + client.threadPool().getThreadContext(), + SECURITY_ORIGIN, + bulkRequestBuilder.request(), + ActionListener.wrap( + bulkResponse -> buildResponseAndClearCache(bulkResponse, responseBuilder, listener), + ex -> listener.onFailure(traceLog("execute bulk request for update", ex)) + ), + client::bulk + ) + ); } // package-private for testing - void validateCurrentApiKeyDocForUpdate(String apiKeyId, Authentication authentication, ApiKeyDoc apiKeyDoc) { + void validateForUpdate(final String apiKeyId, final Authentication authentication, final ApiKeyDoc apiKeyDoc) { assert authentication.getEffectiveSubject().getUser().principal().equals(apiKeyDoc.creator.get("principal")); if (apiKeyDoc.invalidated) { @@ -446,13 +497,14 @@ static XContentBuilder newDocument( */ @Nullable XContentBuilder maybeBuildUpdatedDocument( + final String apiKeyId, final ApiKeyDoc currentApiKeyDoc, final Version targetDocVersion, final Authentication authentication, - final UpdateApiKeyRequest request, + final BaseUpdateApiKeyRequest request, final Set userRoleDescriptors ) throws IOException { - if (isNoop(currentApiKeyDoc, targetDocVersion, authentication, request, userRoleDescriptors)) { + if (isNoop(apiKeyId, currentApiKeyDoc, targetDocVersion, authentication, request, userRoleDescriptors)) { return null; } @@ -502,10 +554,11 @@ XContentBuilder maybeBuildUpdatedDocument( } private boolean isNoop( + final String apiKeyId, final ApiKeyDoc apiKeyDoc, final Version targetDocVersion, final Authentication authentication, - final UpdateApiKeyRequest request, + final BaseUpdateApiKeyRequest request, final Set userRoleDescriptors ) { if (apiKeyDoc.version != targetDocVersion.id) { @@ -557,11 +610,7 @@ private boolean isNoop( final List newRoleDescriptors = request.getRoleDescriptors(); if (newRoleDescriptors != null) { - final List currentRoleDescriptors = parseRoleDescriptorsBytes( - request.getId(), - apiKeyDoc.roleDescriptorsBytes, - false - ); + final List currentRoleDescriptors = parseRoleDescriptorsBytes(apiKeyId, apiKeyDoc.roleDescriptorsBytes, false); if (false == (newRoleDescriptors.size() == currentRoleDescriptors.size() && Set.copyOf(newRoleDescriptors).containsAll(currentRoleDescriptors))) { return false; @@ -570,7 +619,7 @@ private boolean isNoop( assert userRoleDescriptors != null; final List currentLimitedByRoleDescriptors = parseRoleDescriptorsBytes( - request.getId(), + apiKeyId, apiKeyDoc.limitedByRoleDescriptorsBytes, // We want the 7.x `LEGACY_SUPERUSER_ROLE_DESCRIPTOR` role descriptor to be returned here to auto-update // `LEGACY_SUPERUSER_ROLE_DESCRIPTOR` to `ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR`, when we update a 7.x API key. @@ -1064,32 +1113,37 @@ public void logRemovedField(String parserName, Supplier locati } } - private void doUpdateApiKey( - final Authentication authentication, - final UpdateApiKeyRequest request, - final Set userRoleDescriptors, + /** + * @return `null` if the update is a noop, i.e., if no changes to `currentApiKeyDoc` are required + */ + @Nullable + private IndexRequest maybeBuildIndexRequest( final VersionedApiKeyDoc currentVersionedDoc, - final ActionListener listener + final Authentication authentication, + final BaseUpdateApiKeyRequest request, + final Set userRoleDescriptors ) throws IOException { - logger.trace( - "Building update request for API key doc [{}] with seqNo [{}] and primaryTerm [{}]", - request.getId(), - currentVersionedDoc.seqNo(), - currentVersionedDoc.primaryTerm() - ); + if (logger.isTraceEnabled()) { + logger.trace( + "Building index request for update of API key doc [{}] with seqNo [{}] and primaryTerm [{}]", + currentVersionedDoc.id(), + currentVersionedDoc.seqNo(), + currentVersionedDoc.primaryTerm() + ); + } final var targetDocVersion = clusterService.state().nodes().getMinNodeVersion(); final var currentDocVersion = Version.fromId(currentVersionedDoc.doc().version); assert currentDocVersion.onOrBefore(targetDocVersion) : "current API key doc version must be on or before target version"; - if (currentDocVersion.before(targetDocVersion)) { + if (logger.isDebugEnabled() && currentDocVersion.before(targetDocVersion)) { logger.debug( "API key update for [{}] will update version from [{}] to [{}]", - request.getId(), + currentVersionedDoc.id(), currentDocVersion, targetDocVersion ); } - final XContentBuilder builder = maybeBuildUpdatedDocument( + currentVersionedDoc.id(), currentVersionedDoc.doc(), targetDocVersion, authentication, @@ -1097,33 +1151,33 @@ private void doUpdateApiKey( userRoleDescriptors ); final boolean isNoop = builder == null; - if (isNoop) { - logger.debug("Detected noop update request for API key [{}]. Skipping index request.", request.getId()); - listener.onResponse(new UpdateApiKeyResponse(false)); + return isNoop + ? null + : client.prepareIndex(SECURITY_MAIN_ALIAS) + .setId(currentVersionedDoc.id()) + .setSource(builder) + .setIfSeqNo(currentVersionedDoc.seqNo()) + .setIfPrimaryTerm(currentVersionedDoc.primaryTerm()) + .setOpType(DocWriteRequest.OpType.INDEX) + .request(); + } + + private void addErrorsForNotFoundApiKeys( + final BulkUpdateApiKeyResponse.Builder responseBuilder, + final Collection foundDocs, + final List requestedIds + ) { + // Short-circuiting by size is safe: `foundDocs` only contains unique IDs of those requested. Same size here necessarily implies + // same content + if (foundDocs.size() == requestedIds.size()) { return; } - - final IndexRequest indexRequest = client.prepareIndex(SECURITY_MAIN_ALIAS) - .setId(request.getId()) - .setSource(builder) - .setIfSeqNo(currentVersionedDoc.seqNo()) - .setIfPrimaryTerm(currentVersionedDoc.primaryTerm()) - .setOpType(DocWriteRequest.OpType.INDEX) - .request(); - logger.trace("Executing index request to update API key [{}]", request.getId()); - securityIndex.prepareIndexIfNeededThenExecute( - ex -> listener.onFailure(traceLog("prepare security index before update", ex)), - () -> executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - SECURITY_ORIGIN, - client.prepareBulk().add(indexRequest).setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).request(), - ActionListener.wrap( - bulkResponse -> translateResponseAndClearCache(request.getId(), bulkResponse, listener), - ex -> listener.onFailure(traceLog("execute bulk request for update", ex)) - ), - client::bulk - ) - ); + final Set foundIds = foundDocs.stream().map(VersionedApiKeyDoc::id).collect(Collectors.toUnmodifiableSet()); + for (String id : requestedIds) { + if (foundIds.contains(id) == false) { + responseBuilder.error(id, new ResourceNotFoundException("no API key owned by requesting user found for ID [" + id + "]")); + } + } } /** @@ -1359,35 +1413,25 @@ private void indexInvalidation( } } - private void translateResponseAndClearCache( - final String apiKeyId, + private void buildResponseAndClearCache( final BulkResponse bulkResponse, - final ActionListener listener + final BulkUpdateApiKeyResponse.Builder responseBuilder, + final ActionListener listener ) { - final BulkItemResponse[] elements = bulkResponse.getItems(); - assert elements.length == 1 : "expected single item in bulk index response for API key update"; - final var bulkItemResponse = elements[0]; - if (bulkItemResponse.isFailed()) { - listener.onFailure(bulkItemResponse.getFailure().getCause()); - } else { - assert bulkItemResponse.getResponse().getId().equals(apiKeyId); - // Since we made an index request against an existing document, we can't get a NOOP or CREATED here - assert bulkItemResponse.getResponse().getResult() == DocWriteResponse.Result.UPDATED; - clearApiKeyDocCache(apiKeyId, new UpdateApiKeyResponse(true), listener); - } - } - - private static VersionedApiKeyDoc singleDoc(final String apiKeyId, final Collection elements) { - if (elements.size() != 1) { - final var message = "expected single API key doc with ID [" - + apiKeyId - + "] to be found for update but found [" - + elements.size() - + "]"; - assert false : message; - throw new IllegalStateException(message); + for (BulkItemResponse bulkItemResponse : bulkResponse.getItems()) { + final String apiKeyId = bulkItemResponse.getId(); + if (bulkItemResponse.isFailed()) { + responseBuilder.error( + apiKeyId, + new ElasticsearchException("bulk request execution failure", bulkItemResponse.getFailure().getCause()) + ); + } else { + // Since we made an index request against an existing document, we can't get a NOOP or CREATED here + assert bulkItemResponse.getResponse().getResult() == DocWriteResponse.Result.UPDATED; + responseBuilder.updated(apiKeyId); + } } - return elements.iterator().next(); + clearApiKeyDocCache(responseBuilder.build(), listener); } private static void addLimitedByRoleDescriptors(final XContentBuilder builder, final Set limitedByRoleDescriptors) @@ -1446,8 +1490,12 @@ private void clearCache(InvalidateApiKeyResponse result, ActionListener listener) { - executeClearCacheRequest(result, listener, new ClearSecurityCacheRequest().cacheName("api_key_doc").keys(apiKeyId)); + private void clearApiKeyDocCache(final BulkUpdateApiKeyResponse result, final ActionListener listener) { + executeClearCacheRequest( + result, + listener, + new ClearSecurityCacheRequest().cacheName("api_key_doc").keys(result.getUpdated().toArray(String[]::new)) + ); } private void executeClearCacheRequest(T result, ActionListener listener, ClearSecurityCacheRequest clearApiKeyCacheRequest) { @@ -1631,13 +1679,13 @@ private static VersionedApiKeyDoc convertSearchHitToVersionedApiKeyDoc(SearchHit try ( XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, hit.getSourceRef(), XContentType.JSON) ) { - return new VersionedApiKeyDoc(ApiKeyDoc.fromXContent(parser), hit.getSeqNo(), hit.getPrimaryTerm()); + return new VersionedApiKeyDoc(ApiKeyDoc.fromXContent(parser), hit.getId(), hit.getSeqNo(), hit.getPrimaryTerm()); } catch (IOException ex) { throw new UncheckedIOException(ex); } } - private record VersionedApiKeyDoc(ApiKeyDoc doc, long seqNo, long primaryTerm) {} + private record VersionedApiKeyDoc(ApiKeyDoc doc, String id, long seqNo, long primaryTerm) {} private RemovalListener> getAuthCacheRemovalListener(int maximumWeight) { return notification -> { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java new file mode 100644 index 0000000000000..7aa0dc05e5418 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.rest.action.apikey; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public final class RestBulkUpdateApiKeyAction extends ApiKeyBaseRestHandler { + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "bulk_update_api_key_request", + a -> new BulkUpdateApiKeyRequest((List) a[0], (List) a[1], (Map) a[2]) + ); + + static { + PARSER.declareStringArray(constructorArg(), new ParseField("ids")); + PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { + p.nextToken(); + return RoleDescriptor.parse(n, p, false); + }, new ParseField("role_descriptors")); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + } + + public RestBulkUpdateApiKeyAction(final Settings settings, final XPackLicenseState licenseState) { + super(settings, licenseState); + } + + @Override + public List routes() { + return List.of(new Route(POST, "/_security/api_key/_bulk_update")); + } + + @Override + public String getName() { + return "xpack_security_bulk_update_api_key"; + } + + @Override + protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { + try (XContentParser parser = request.contentParser()) { + final BulkUpdateApiKeyRequest parsed = PARSER.parse(parser, null); + return channel -> client.execute(BulkUpdateApiKeyAction.INSTANCE, parsed, new RestToXContentListener<>(channel)); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java index 6da61510ed8a1..c04a11b4bae69 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java @@ -167,6 +167,17 @@ public void invalidateApiKeysForUser(String username) throws IOException { execute(request); } + /** + * Uses the REST API to invalidate API Keys given their IDs. + * @see org.elasticsearch.xpack.security.rest.action.apikey.RestInvalidateApiKeyAction + */ + public void invalidateApiKeys(final String... apiKeyIds) throws IOException { + final var endpoint = "/_security/api_key/"; + final var request = new Request(HttpDelete.METHOD_NAME, endpoint); + request.setJsonEntity(XContentTestUtils.convertToXContent(Map.of("ids", apiKeyIds), XContentType.JSON).utf8ToString()); + execute(request); + } + /** * Uses the REST API to get a Role descriptor * @see org.elasticsearch.xpack.security.rest.action.role.RestGetRolesAction diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index ae4fa08bc0806..9a1014a96c1aa 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -65,6 +65,8 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; @@ -544,6 +546,23 @@ public void testMixingValidAndInvalidCredentials() throws Exception { } } + public void testBulkUpdateWithApiKeyCredentialNotSupported() { + final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); + final ApiKeyService service = createApiKeyService(settings); + + final PlainActionFuture listener = new PlainActionFuture<>(); + service.updateApiKeys( + AuthenticationTestHelper.builder().apiKey().build(false), + BulkUpdateApiKeyRequest.usingApiKeyIds("id"), + Set.of(), + listener + ); + + final var ex = expectThrows(ExecutionException.class, listener::get); + assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(ex.getMessage(), containsString("authentication via API key not supported: only the owner user can update an API key")); + } + private Map mockKeyDocument( ApiKeyService service, String id, @@ -1660,17 +1679,11 @@ public void testValidateApiKeyDocBeforeUpdate() throws IOException { new Authentication.RealmRef("realm1", "realm_type1", "node") ); - var ex = expectThrows( - IllegalArgumentException.class, - () -> apiKeyService.validateCurrentApiKeyDocForUpdate(apiKeyId, auth, apiKeyDocWithNullName) - ); + var ex = expectThrows(IllegalArgumentException.class, () -> apiKeyService.validateForUpdate(apiKeyId, auth, apiKeyDocWithNullName)); assertThat(ex.getMessage(), containsString("cannot update legacy API key [" + apiKeyId + "] without name")); final var apiKeyDocWithEmptyName = buildApiKeyDoc(hash, -1, false, "", Version.V_8_2_0.id); - ex = expectThrows( - IllegalArgumentException.class, - () -> apiKeyService.validateCurrentApiKeyDocForUpdate(apiKeyId, auth, apiKeyDocWithEmptyName) - ); + ex = expectThrows(IllegalArgumentException.class, () -> apiKeyService.validateForUpdate(apiKeyId, auth, apiKeyDocWithEmptyName)); assertThat(ex.getMessage(), containsString("cannot update legacy API key [" + apiKeyId + "] without name")); } @@ -1737,6 +1750,7 @@ public void testMaybeBuildUpdatedDocument() throws IOException { final var service = createApiKeyService(); final XContentBuilder builder = service.maybeBuildUpdatedDocument( + request.getId(), oldApiKeyDoc, newVersion, newAuthentication, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java index 750482f760234..eb0b7bea1a5fb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyActionTests.java @@ -53,6 +53,9 @@ public void testAbsentRoleDescriptorsAndMetadataSetToNull() { dispatchRequest(builder.build()); - assertEquals(new UpdateApiKeyRequest(apiKeyId, null, null), requestHolder.get()); + final UpdateApiKeyRequest request = requestHolder.get(); + assertEquals(apiKeyId, request.getId()); + assertNull(request.getRoleDescriptors()); + assertNull(request.getMetadata()); } } From 6d1b2277b35d20e9821e637fccae91dce14b4938 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 1 Aug 2022 08:05:22 -0400 Subject: [PATCH 025/265] [ML] add new text_similarity nlp task (#88439) text_similarity is a cross-encoding task that compares two text inputs at inference time. It can be used for cross-encoding re-ranking ``` POST _ml/trained_models/cross-encoder__ms-marco-tinybert-l-2-v2/_infer { "docs":[{ "text_field": "Berlin has a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers."}, {"text_field": "New York City is famous for the Metropolitan Museum of Art."}], "inference_config": { "text_similarity": { "text": "How many people live in Berlin?" } } } ``` With results: ``` { "inference_results": [ { "predicted_value": 7.235751628875732 }, { "predicted_value": -11.562295913696289 } ] } ``` Or with just raw text similarity. Here is an example for check if two questions are very similar: ``` POST _ml/trained_models/cross-encoder__quora-distilroberta-base/_infer { "docs":[{ "text_field": "what is your quest?"}, { "text_field": "what is your favorite color?"}, { "text_field": "is the swallow african or european?"}, { "text_field": "what is the airspeed velocity of a swallow carrying coconuts?"}, { "text_field": "how fast is an unladen swallow?"}], "inference_config": { "text_similarity": { "text": "what is the airspeed velocity of an unladen swallow?" } } } ``` With results: ``` { "inference_results": [ { "predicted_value": -8.312414169311523 }, { "predicted_value": -8.239330291748047 }, { "predicted_value": -8.256011009216309 }, { "predicted_value": -4.1945390701293945 }, { "predicted_value": -3.294121742248535 } ] } ``` --- docs/changelog/88439.yaml | 5 + .../MlInferenceNamedXContentProvider.java | 35 +++ .../TextSimilarityInferenceResults.java | 81 +++++++ .../trainedmodel/TextSimilarityConfig.java | 220 ++++++++++++++++++ .../TextSimilarityConfigUpdate.java | 216 +++++++++++++++++ .../ml/inference/TrainedModelConfigTests.java | 6 +- .../TextSimilarityInferenceResultsTests.java | 35 +++ .../TextSimilarityConfigTests.java | 71 ++++++ .../TextSimilarityConfigUpdateTests.java | 182 +++++++++++++++ .../ml/inference/nlp/FillMaskProcessor.java | 2 +- .../inference/nlp/PassThroughProcessor.java | 3 +- .../nlp/QuestionAnsweringProcessor.java | 2 +- .../xpack/ml/inference/nlp/TaskType.java | 18 +- .../inference/nlp/TextEmbeddingProcessor.java | 3 +- .../nlp/TextSimilarityProcessor.java | 149 ++++++++++++ .../nlp/ZeroShotClassificationProcessor.java | 5 +- .../inference/nlp/FillMaskProcessorTests.java | 8 +- .../nlp/QuestionAnsweringProcessorTests.java | 2 +- .../nlp/TextSimilarityProcessorTests.java | 100 ++++++++ 19 files changed, 1117 insertions(+), 26 deletions(-) create mode 100644 docs/changelog/88439.yaml create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResults.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfig.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResultsTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdateTests.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessor.java create mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java diff --git a/docs/changelog/88439.yaml b/docs/changelog/88439.yaml new file mode 100644 index 0000000000000..b5cba4eb4c2b0 --- /dev/null +++ b/docs/changelog/88439.yaml @@ -0,0 +1,5 @@ +pr: 88439 +summary: Add new `text_similarity` nlp task +area: Machine Learning +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlInferenceNamedXContentProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlInferenceNamedXContentProvider.java index 107d717498a6a..bc0ae34aed0cd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlInferenceNamedXContentProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlInferenceNamedXContentProvider.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.ml.inference.results.QuestionAnsweringInferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.RegressionInferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.TextEmbeddingResults; +import org.elasticsearch.xpack.core.ml.inference.results.TextSimilarityInferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.WarningInferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.BertTokenization; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.BertTokenizationUpdate; @@ -62,6 +63,8 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigUpdate; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.Tokenization; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TokenizationUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TrainedModel; @@ -382,6 +385,20 @@ public List getNamedXContentParsers() { QuestionAnsweringConfig::fromXContentLenient ) ); + namedXContent.add( + new NamedXContentRegistry.Entry( + StrictlyParsedInferenceConfig.class, + new ParseField(TextSimilarityConfig.NAME), + TextSimilarityConfig::fromXContentStrict + ) + ); + namedXContent.add( + new NamedXContentRegistry.Entry( + LenientlyParsedInferenceConfig.class, + new ParseField(TextSimilarityConfig.NAME), + TextSimilarityConfig::fromXContentLenient + ) + ); // Inference Configs Update namedXContent.add( @@ -447,6 +464,13 @@ public List getNamedXContentParsers() { QuestionAnsweringConfigUpdate::fromXContentStrict ) ); + namedXContent.add( + new NamedXContentRegistry.Entry( + InferenceConfigUpdate.class, + new ParseField(TextSimilarityConfigUpdate.NAME), + TextSimilarityConfigUpdate::fromXContentStrict + ) + ); // Inference models namedXContent.add(new NamedXContentRegistry.Entry(InferenceModel.class, Ensemble.NAME, EnsembleInferenceModel::fromXContent)); @@ -579,6 +603,13 @@ public List getNamedWriteables() { QuestionAnsweringInferenceResults::new ) ); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + InferenceResults.class, + TextSimilarityInferenceResults.NAME, + TextSimilarityInferenceResults::new + ) + ); // Inference Configs namedWriteables.add( new NamedWriteableRegistry.Entry(InferenceConfig.class, ClassificationConfig.NAME.getPreferredName(), ClassificationConfig::new) @@ -599,6 +630,7 @@ public List getNamedWriteables() { namedWriteables.add( new NamedWriteableRegistry.Entry(InferenceConfig.class, QuestionAnsweringConfig.NAME, QuestionAnsweringConfig::new) ); + namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, TextSimilarityConfig.NAME, TextSimilarityConfig::new)); // Inference Configs Updates namedWriteables.add( @@ -650,6 +682,9 @@ public List getNamedWriteables() { QuestionAnsweringConfigUpdate::new ) ); + namedWriteables.add( + new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, TextSimilarityConfigUpdate.NAME, TextSimilarityConfigUpdate::new) + ); // Location namedWriteables.add( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResults.java new file mode 100644 index 0000000000000..6848d47f187bd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResults.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference.results; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class TextSimilarityInferenceResults extends NlpInferenceResults { + + public static final String NAME = "text_similarity"; + + private final String resultsField; + private final double score; + + public TextSimilarityInferenceResults(String resultsField, double score, boolean isTruncated) { + super(isTruncated); + this.resultsField = resultsField; + this.score = score; + } + + public TextSimilarityInferenceResults(StreamInput in) throws IOException { + super(in); + this.resultsField = in.readString(); + this.score = in.readDouble(); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + out.writeString(resultsField); + out.writeDouble(score); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; + TextSimilarityInferenceResults that = (TextSimilarityInferenceResults) o; + return Objects.equals(resultsField, that.resultsField) && Objects.equals(score, that.score); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), resultsField, score); + } + + @Override + public String getResultsField() { + return resultsField; + } + + @Override + public Double predictedValue() { + return score; + } + + @Override + void addMapFields(Map map) { + map.put(resultsField, score); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void doXContentBody(XContentBuilder builder, Params params) throws IOException { + builder.field(resultsField, score); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfig.java new file mode 100644 index 0000000000000..c1eae964898be --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfig.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.NamedXContentObjectHelper; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; +import java.util.Optional; + +/** + * Text similarity configuration for running a cross-encoder transformer model with the given text and some number of documents + * containing another text field. + */ +public class TextSimilarityConfig implements NlpConfig { + + public static final String NAME = "text_similarity"; + public static final ParseField TEXT = new ParseField("text"); + public static final ParseField SPAN_SCORE_COMBINATION_FUNCTION = new ParseField("span_score_combination_function"); + + public static TextSimilarityConfig fromXContentStrict(XContentParser parser) { + return STRICT_PARSER.apply(parser, null); + } + + public static TextSimilarityConfig fromXContentLenient(XContentParser parser) { + return LENIENT_PARSER.apply(parser, null); + } + + private static final ConstructingObjectParser STRICT_PARSER = createParser(false); + private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>( + NAME, + ignoreUnknownFields, + a -> new TextSimilarityConfig((VocabularyConfig) a[0], (Tokenization) a[1], (String) a[2], (String) a[3]) + ); + parser.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { + if (ignoreUnknownFields == false) { + throw ExceptionsHelper.badRequestException( + "illegal setting [{}] on inference model creation", + VOCABULARY.getPreferredName() + ); + } + return VocabularyConfig.fromXContentLenient(p); + }, VOCABULARY); + parser.declareNamedObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c, n) -> p.namedObject(Tokenization.class, n, ignoreUnknownFields), + TOKENIZATION + ); + parser.declareString(ConstructingObjectParser.optionalConstructorArg(), RESULTS_FIELD); + parser.declareString(ConstructingObjectParser.optionalConstructorArg(), SPAN_SCORE_COMBINATION_FUNCTION); + return parser; + } + + private final VocabularyConfig vocabularyConfig; + private final Tokenization tokenization; + private final String resultsField; + private final String text; + private final SpanScoreFunction spanScoreFunction; + + TextSimilarityConfig( + @Nullable VocabularyConfig vocabularyConfig, + @Nullable Tokenization tokenization, + @Nullable String resultsField, + @Nullable String spanScoreFunction + ) { + this.vocabularyConfig = Optional.ofNullable(vocabularyConfig) + .orElse(new VocabularyConfig(InferenceIndexConstants.nativeDefinitionStore())); + this.tokenization = tokenization == null ? Tokenization.createDefault() : tokenization; + this.resultsField = resultsField; + this.text = null; + this.spanScoreFunction = Optional.ofNullable(spanScoreFunction).map(SpanScoreFunction::fromString).orElse(SpanScoreFunction.MAX); + } + + public TextSimilarityConfig( + String text, + VocabularyConfig vocabularyConfig, + Tokenization tokenization, + String resultsField, + SpanScoreFunction spanScoreFunction + ) { + this.text = ExceptionsHelper.requireNonNull(text, TEXT); + this.vocabularyConfig = ExceptionsHelper.requireNonNull(vocabularyConfig, VOCABULARY); + this.tokenization = ExceptionsHelper.requireNonNull(tokenization, TOKENIZATION); + this.resultsField = resultsField; + this.spanScoreFunction = spanScoreFunction; + } + + public TextSimilarityConfig(StreamInput in) throws IOException { + vocabularyConfig = new VocabularyConfig(in); + tokenization = in.readNamedWriteable(Tokenization.class); + resultsField = in.readOptionalString(); + text = in.readOptionalString(); + spanScoreFunction = in.readEnum(SpanScoreFunction.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + vocabularyConfig.writeTo(out); + out.writeNamedWriteable(tokenization); + out.writeOptionalString(resultsField); + out.writeOptionalString(text); + out.writeEnum(spanScoreFunction); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(VOCABULARY.getPreferredName(), vocabularyConfig, params); + NamedXContentObjectHelper.writeNamedObject(builder, params, TOKENIZATION.getPreferredName(), tokenization); + if (resultsField != null) { + builder.field(RESULTS_FIELD.getPreferredName(), resultsField); + } + if (text != null) { + builder.field(TEXT.getPreferredName(), text); + } + builder.field(SPAN_SCORE_COMBINATION_FUNCTION.getPreferredName(), spanScoreFunction.toString()); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean isTargetTypeSupported(TargetType targetType) { + return false; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_8_5_0; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public boolean equals(Object o) { + if (o == this) return true; + if (o == null || getClass() != o.getClass()) return false; + + TextSimilarityConfig that = (TextSimilarityConfig) o; + return Objects.equals(vocabularyConfig, that.vocabularyConfig) + && Objects.equals(tokenization, that.tokenization) + && Objects.equals(text, that.text) + && Objects.equals(spanScoreFunction, that.spanScoreFunction) + && Objects.equals(resultsField, that.resultsField); + } + + @Override + public int hashCode() { + return Objects.hash(vocabularyConfig, tokenization, resultsField, text, spanScoreFunction); + } + + @Override + public VocabularyConfig getVocabularyConfig() { + return vocabularyConfig; + } + + @Override + public Tokenization getTokenization() { + return tokenization; + } + + public String getText() { + return text; + } + + public SpanScoreFunction getSpanScoreFunction() { + return spanScoreFunction; + } + + @Override + public String getResultsField() { + return resultsField; + } + + @Override + public boolean isAllocateOnly() { + return true; + } + + public enum SpanScoreFunction { + MAX, + MEAN; + + public static SpanScoreFunction fromString(String value) { + return valueOf(value.toUpperCase(Locale.ROOT)); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java new file mode 100644 index 0000000000000..48ee53c62d49c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java @@ -0,0 +1,216 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.NamedXContentObject; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig.RESULTS_FIELD; +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig.TOKENIZATION; +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfig.SPAN_SCORE_COMBINATION_FUNCTION; +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfig.TEXT; + +public class TextSimilarityConfigUpdate extends NlpConfigUpdate implements NamedXContentObject { + + public static final String NAME = "text_similarity"; + + public static TextSimilarityConfigUpdate fromXContentStrict(XContentParser parser) { + return STRICT_PARSER.apply(parser, null).build(); + } + + public static TextSimilarityConfigUpdate fromMap(Map map) { + Map options = new HashMap<>(map); + String resultsField = (String) options.remove(RESULTS_FIELD.getPreferredName()); + String text = (String) options.remove(TEXT.getPreferredName()); + String spanScoreFunction = (String) options.remove(SPAN_SCORE_COMBINATION_FUNCTION.getPreferredName()); + TokenizationUpdate tokenizationUpdate = NlpConfigUpdate.tokenizationFromMap(options); + if (options.isEmpty() == false) { + throw ExceptionsHelper.badRequestException("Unrecognized fields {}.", map.keySet()); + } + return new TextSimilarityConfigUpdate(text, resultsField, tokenizationUpdate, spanScoreFunction); + } + + private static final ObjectParser STRICT_PARSER = new ObjectParser<>( + NAME, + TextSimilarityConfigUpdate.Builder::new + ); + + static { + STRICT_PARSER.declareString(Builder::setText, TEXT); + STRICT_PARSER.declareString(Builder::setResultsField, RESULTS_FIELD); + STRICT_PARSER.declareString(Builder::setSpanScoreFunction, SPAN_SCORE_COMBINATION_FUNCTION); + STRICT_PARSER.declareNamedObject( + Builder::setTokenizationUpdate, + (p, c, n) -> p.namedObject(TokenizationUpdate.class, n, false), + TOKENIZATION + ); + } + + private final String text; + private final String resultsField; + private final TextSimilarityConfig.SpanScoreFunction spanScoreFunction; + + public TextSimilarityConfigUpdate( + String text, + @Nullable String resultsField, + @Nullable TokenizationUpdate tokenizationUpdate, + @Nullable String spanScoreFunction + ) { + super(tokenizationUpdate); + this.text = ExceptionsHelper.requireNonNull(text, TEXT); + this.resultsField = resultsField; + this.spanScoreFunction = Optional.ofNullable(spanScoreFunction) + .map(TextSimilarityConfig.SpanScoreFunction::fromString) + .orElse(null); + } + + public TextSimilarityConfigUpdate(StreamInput in) throws IOException { + super(in); + text = in.readString(); + resultsField = in.readOptionalString(); + spanScoreFunction = in.readOptionalEnum(TextSimilarityConfig.SpanScoreFunction.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(text); + out.writeOptionalString(resultsField); + out.writeOptionalEnum(spanScoreFunction); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + if (resultsField != null) { + builder.field(RESULTS_FIELD.getPreferredName(), resultsField); + } + if (spanScoreFunction != null) { + builder.field(SPAN_SCORE_COMBINATION_FUNCTION.getPreferredName(), spanScoreFunction); + } + builder.field(TEXT.getPreferredName(), text); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public InferenceConfig apply(InferenceConfig originalConfig) { + if (originalConfig instanceof TextSimilarityConfig == false) { + throw ExceptionsHelper.badRequestException( + "Inference config of type [{}] can not be updated with a inference request of type [{}]", + originalConfig.getName(), + getName() + ); + } + + TextSimilarityConfig textSimilarityConfig = (TextSimilarityConfig) originalConfig; + return new TextSimilarityConfig( + text, + textSimilarityConfig.getVocabularyConfig(), + tokenizationUpdate == null + ? textSimilarityConfig.getTokenization() + : tokenizationUpdate.apply(textSimilarityConfig.getTokenization()), + Optional.ofNullable(resultsField).orElse(textSimilarityConfig.getResultsField()), + Optional.ofNullable(spanScoreFunction).orElse(textSimilarityConfig.getSpanScoreFunction()) + ); + } + + @Override + public boolean isSupported(InferenceConfig config) { + return config instanceof TextSimilarityConfig; + } + + @Override + public String getResultsField() { + return resultsField; + } + + @Override + public InferenceConfigUpdate.Builder, ? extends InferenceConfigUpdate> newBuilder() { + return new Builder().setText(text).setResultsField(resultsField).setTokenizationUpdate(tokenizationUpdate); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public boolean equals(Object o) { + if (o == this) return true; + if (o == null || getClass() != o.getClass()) return false; + + TextSimilarityConfigUpdate that = (TextSimilarityConfigUpdate) o; + return Objects.equals(text, that.text) + && Objects.equals(resultsField, that.resultsField) + && Objects.equals(tokenizationUpdate, that.tokenizationUpdate); + } + + @Override + public int hashCode() { + return Objects.hash(resultsField, tokenizationUpdate, text); + } + + public String getText() { + return text; + } + + public static class Builder implements InferenceConfigUpdate.Builder { + private String resultsField; + private String spanScoreFunction; + private TokenizationUpdate tokenizationUpdate; + private String text; + + @Override + public TextSimilarityConfigUpdate.Builder setResultsField(String resultsField) { + this.resultsField = resultsField; + return this; + } + + public Builder setTokenizationUpdate(TokenizationUpdate tokenizationUpdate) { + this.tokenizationUpdate = tokenizationUpdate; + return this; + } + + public Builder setText(String text) { + this.text = text; + return this; + } + + public Builder setSpanScoreFunction(String spanScoreFunction) { + this.spanScoreFunction = spanScoreFunction; + return this; + } + + @Override + public TextSimilarityConfigUpdate build() { + return new TextSimilarityConfigUpdate(text, resultsField, tokenizationUpdate, spanScoreFunction); + } + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_8_5_0; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java index 8f4bc321fa1fb..934173b02d426 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java @@ -29,10 +29,12 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NerConfigTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PassThroughConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.QuestionAnsweringConfigTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.RegressionConfigTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfigTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfigTests; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.MlStrings; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; @@ -76,7 +78,9 @@ public static TrainedModelConfig.Builder createTestInstance(String modelId, bool PassThroughConfigTests.createRandom(), TextClassificationConfigTests.createRandom(), FillMaskConfigTests.createRandom(), - TextEmbeddingConfigTests.createRandom() } + TextEmbeddingConfigTests.createRandom(), + QuestionAnsweringConfigTests.createRandom(), + TextSimilarityConfigTests.createRandom() } : new InferenceConfig[] { ClassificationConfigTests.randomClassificationConfig(), RegressionConfigTests.randomRegressionConfig() }; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResultsTests.java new file mode 100644 index 0000000000000..ac80970ae13cb --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResultsTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference.results; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.ingest.IngestDocument; + +import static org.hamcrest.Matchers.equalTo; + +public class TextSimilarityInferenceResultsTests extends InferenceResultsTestCase { + + public static TextSimilarityInferenceResults createRandomResults() { + return new TextSimilarityInferenceResults(randomAlphaOfLength(10), randomDoubleBetween(0.0, 1.0, false), randomBoolean()); + } + + @Override + protected TextSimilarityInferenceResults createTestInstance() { + return createRandomResults(); + } + + @Override + protected Writeable.Reader instanceReader() { + return TextSimilarityInferenceResults::new; + } + + @Override + void assertFieldValues(TextSimilarityInferenceResults createdInstance, IngestDocument document, String resultsField) { + String path = resultsField + "." + createdInstance.getResultsField(); + assertThat(document.getFieldValue(path, Double.class), equalTo(createdInstance.predictedValue())); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java new file mode 100644 index 0000000000000..3d2f4b21972f0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.InferenceConfigItemTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.function.Predicate; + +public class TextSimilarityConfigTests extends InferenceConfigItemTestCase { + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> field.isEmpty() == false; + } + + @Override + protected TextSimilarityConfig doParseInstance(XContentParser parser) throws IOException { + return TextSimilarityConfig.fromXContentLenient(parser); + } + + @Override + protected Writeable.Reader instanceReader() { + return TextSimilarityConfig::new; + } + + @Override + protected TextSimilarityConfig createTestInstance() { + return createRandom(); + } + + @Override + protected TextSimilarityConfig mutateInstanceForVersion(TextSimilarityConfig instance, Version version) { + return instance; + } + + public static TextSimilarityConfig createRandom() { + return new TextSimilarityConfig( + randomBoolean() ? null : VocabularyConfigTests.createRandom(), + randomBoolean() + ? null + : randomFrom( + BertTokenizationTests.createRandomWithSpan(), + MPNetTokenizationTests.createRandomWithSpan(), + RobertaTokenizationTests.createRandomWithSpan() + ), + randomBoolean() ? null : randomAlphaOfLength(7), + randomBoolean() + ? null + : randomFrom( + Arrays.stream(TextSimilarityConfig.SpanScoreFunction.values()) + .map(TextSimilarityConfig.SpanScoreFunction::toString) + .toArray(String[]::new) + ) + ); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdateTests.java new file mode 100644 index 0000000000000..5c8d21ae99fe2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdateTests.java @@ -0,0 +1,182 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigTestScaffolding.cloneWithNewTruncation; +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigTestScaffolding.createTokenizationUpdate; +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfig.TEXT; +import static org.hamcrest.Matchers.equalTo; + +public class TextSimilarityConfigUpdateTests extends AbstractNlpConfigUpdateTestCase { + + public static TextSimilarityConfigUpdate randomUpdate() { + return new TextSimilarityConfigUpdate( + randomAlphaOfLength(10), + randomBoolean() ? null : randomAlphaOfLength(5), + randomBoolean() ? null : new BertTokenizationUpdate(randomFrom(Tokenization.Truncate.values()), null), + randomBoolean() + ? null + : randomFrom( + Arrays.stream(TextSimilarityConfig.SpanScoreFunction.values()) + .map(TextSimilarityConfig.SpanScoreFunction::toString) + .toArray(String[]::new) + ) + ); + } + + public static TextSimilarityConfigUpdate mutateForVersion(TextSimilarityConfigUpdate instance, Version version) { + if (version.before(Version.V_8_1_0)) { + return new TextSimilarityConfigUpdate(instance.getText(), instance.getResultsField(), null, null); + } + return instance; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected TextSimilarityConfigUpdate doParseInstance(XContentParser parser) throws IOException { + return TextSimilarityConfigUpdate.fromXContentStrict(parser); + } + + @Override + protected Writeable.Reader instanceReader() { + return TextSimilarityConfigUpdate::new; + } + + @Override + protected TextSimilarityConfigUpdate createTestInstance() { + return createRandom(); + } + + @Override + protected TextSimilarityConfigUpdate mutateInstanceForVersion(TextSimilarityConfigUpdate instance, Version version) { + return mutateForVersion(instance, version); + } + + @Override + Tuple, TextSimilarityConfigUpdate> fromMapTestInstances(TokenizationUpdate expectedTokenization) { + String func = randomFrom( + Arrays.stream(TextSimilarityConfig.SpanScoreFunction.values()) + .map(TextSimilarityConfig.SpanScoreFunction::toString) + .toArray(String[]::new) + ); + TextSimilarityConfigUpdate expected = new TextSimilarityConfigUpdate( + "What is the meaning of life?", + "ml-results", + expectedTokenization, + func + ); + + Map config = new HashMap<>() { + { + put(TEXT.getPreferredName(), "What is the meaning of life?"); + put(TextSimilarityConfig.RESULTS_FIELD.getPreferredName(), "ml-results"); + put(TextSimilarityConfig.SPAN_SCORE_COMBINATION_FUNCTION.getPreferredName(), func); + } + }; + return Tuple.tuple(config, expected); + } + + @Override + TextSimilarityConfigUpdate fromMap(Map map) { + return TextSimilarityConfigUpdate.fromMap(map); + } + + public void testApply() { + Tokenization tokenizationConfig = randomFrom( + BertTokenizationTests.createRandom(), + MPNetTokenizationTests.createRandom(), + RobertaTokenizationTests.createRandom() + ); + TextSimilarityConfig originalConfig = new TextSimilarityConfig( + randomBoolean() ? null : VocabularyConfigTests.createRandom(), + tokenizationConfig, + randomBoolean() ? null : randomAlphaOfLength(8), + randomBoolean() + ? null + : randomFrom( + Arrays.stream(TextSimilarityConfig.SpanScoreFunction.values()) + .map(TextSimilarityConfig.SpanScoreFunction::toString) + .toArray(String[]::new) + ) + ); + assertThat( + new TextSimilarityConfig( + "Are you my mother?", + originalConfig.getVocabularyConfig(), + originalConfig.getTokenization(), + originalConfig.getResultsField(), + originalConfig.getSpanScoreFunction() + ), + equalTo(new TextSimilarityConfigUpdate.Builder().setText("Are you my mother?").build().apply(originalConfig)) + ); + assertThat( + new TextSimilarityConfig( + "Are you my mother?", + originalConfig.getVocabularyConfig(), + originalConfig.getTokenization(), + "updated-field", + originalConfig.getSpanScoreFunction() + ), + equalTo( + new TextSimilarityConfigUpdate.Builder().setText("Are you my mother?") + .setResultsField("updated-field") + .build() + .apply(originalConfig) + ) + ); + + Tokenization.Truncate truncate = randomFrom(Tokenization.Truncate.values()); + Tokenization tokenization = cloneWithNewTruncation(originalConfig.getTokenization(), truncate); + assertThat( + new TextSimilarityConfig( + "Are you my mother?", + originalConfig.getVocabularyConfig(), + tokenization, + originalConfig.getResultsField(), + originalConfig.getSpanScoreFunction() + ), + equalTo( + new TextSimilarityConfigUpdate.Builder().setText("Are you my mother?") + .setTokenizationUpdate(createTokenizationUpdate(originalConfig.getTokenization(), truncate, null)) + .build() + .apply(originalConfig) + ) + ); + } + + public static TextSimilarityConfigUpdate createRandom() { + return randomUpdate(); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(new MlInferenceNamedXContentProvider().getNamedWriteables()); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessor.java index a866d691cb7d1..d8f86eef2c4d0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessor.java @@ -29,7 +29,7 @@ public class FillMaskProcessor extends NlpTask.Processor { - FillMaskProcessor(NlpTokenizer tokenizer, FillMaskConfig config) { + FillMaskProcessor(NlpTokenizer tokenizer) { super(tokenizer); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/PassThroughProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/PassThroughProcessor.java index f4859405d35b9..03f1084e39184 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/PassThroughProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/PassThroughProcessor.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.PyTorchPassThroughResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PassThroughConfig; import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.NlpTokenizer; import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.TokenizationResult; import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchInferenceResult; @@ -28,7 +27,7 @@ public class PassThroughProcessor extends NlpTask.Processor { private final NlpTask.RequestBuilder requestBuilder; - PassThroughProcessor(NlpTokenizer tokenizer, PassThroughConfig config) { + PassThroughProcessor(NlpTokenizer tokenizer) { super(tokenizer); this.requestBuilder = tokenizer.requestBuilder(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessor.java index 33b08d13c49b2..0014360fb61ff 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessor.java @@ -31,7 +31,7 @@ public class QuestionAnsweringProcessor extends NlpTask.Processor { - QuestionAnsweringProcessor(NlpTokenizer tokenizer, QuestionAnsweringConfig config) { + QuestionAnsweringProcessor(NlpTokenizer tokenizer) { super(tokenizer); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TaskType.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TaskType.java index 32c0ded38b34c..a1fca461a7381 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TaskType.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TaskType.java @@ -7,13 +7,9 @@ package org.elasticsearch.xpack.ml.inference.nlp; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.FillMaskConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NerConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PassThroughConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.QuestionAnsweringConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ZeroShotClassificationConfig; import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.NlpTokenizer; @@ -36,19 +32,19 @@ public NlpTask.Processor createProcessor(NlpTokenizer tokenizer, NlpConfig confi FILL_MASK { @Override public NlpTask.Processor createProcessor(NlpTokenizer tokenizer, NlpConfig config) { - return new FillMaskProcessor(tokenizer, (FillMaskConfig) config); + return new FillMaskProcessor(tokenizer); } }, PASS_THROUGH { @Override public NlpTask.Processor createProcessor(NlpTokenizer tokenizer, NlpConfig config) { - return new PassThroughProcessor(tokenizer, (PassThroughConfig) config); + return new PassThroughProcessor(tokenizer); } }, TEXT_EMBEDDING { @Override public NlpTask.Processor createProcessor(NlpTokenizer tokenizer, NlpConfig config) { - return new TextEmbeddingProcessor(tokenizer, (TextEmbeddingConfig) config); + return new TextEmbeddingProcessor(tokenizer); } }, ZERO_SHOT_CLASSIFICATION { @@ -60,7 +56,13 @@ public NlpTask.Processor createProcessor(NlpTokenizer tokenizer, NlpConfig confi QUESTION_ANSWERING { @Override public NlpTask.Processor createProcessor(NlpTokenizer tokenizer, NlpConfig config) { - return new QuestionAnsweringProcessor(tokenizer, (QuestionAnsweringConfig) config); + return new QuestionAnsweringProcessor(tokenizer); + } + }, + TEXT_SIMILARITY { + @Override + public NlpTask.Processor createProcessor(NlpTokenizer tokenizer, NlpConfig config) { + return new TextSimilarityProcessor(tokenizer); } }; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessor.java index 0671235176ad2..c11ea2005a05d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessor.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.TextEmbeddingResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfig; import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.NlpTokenizer; import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.TokenizationResult; import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchInferenceResult; @@ -27,7 +26,7 @@ public class TextEmbeddingProcessor extends NlpTask.Processor { private final NlpTask.RequestBuilder requestBuilder; - TextEmbeddingProcessor(NlpTokenizer tokenizer, TextEmbeddingConfig config) { + TextEmbeddingProcessor(NlpTokenizer tokenizer) { super(tokenizer); this.requestBuilder = tokenizer.requestBuilder(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessor.java new file mode 100644 index 0000000000000..1f296438c796c --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessor.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.nlp; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; +import org.elasticsearch.xpack.core.ml.inference.results.TextSimilarityInferenceResults; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.Tokenization; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.NlpTokenizer; +import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.TokenizationResult; +import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchInferenceResult; + +import java.io.IOException; +import java.util.List; +import java.util.Optional; + +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig.DEFAULT_RESULTS_FIELD; + +public class TextSimilarityProcessor extends NlpTask.Processor { + + TextSimilarityProcessor(NlpTokenizer tokenizer) { + super(tokenizer); + } + + @Override + public void validateInputs(List inputs) { + // nothing to validate + } + + @Override + public NlpTask.RequestBuilder getRequestBuilder(NlpConfig nlpConfig) { + if (nlpConfig instanceof TextSimilarityConfig textSimilarityConfig) { + return new RequestBuilder(tokenizer, textSimilarityConfig.getText()); + } + throw ExceptionsHelper.badRequestException( + "please provide configuration update for text_similarity task including the desired [text]" + ); + } + + @Override + public NlpTask.ResultProcessor getResultProcessor(NlpConfig nlpConfig) { + if (nlpConfig instanceof TextSimilarityConfig textSimilarityConfig) { + return new ResultProcessor( + textSimilarityConfig.getText(), + textSimilarityConfig.getResultsField(), + textSimilarityConfig.getSpanScoreFunction() + ); + } + throw ExceptionsHelper.badRequestException( + "please provide configuration update for text_similarity task including the desired [text]" + ); + } + + record RequestBuilder(NlpTokenizer tokenizer, String sequence) implements NlpTask.RequestBuilder { + + @Override + public NlpTask.Request buildRequest(List inputs, String requestId, Tokenization.Truncate truncate, int span) + throws IOException { + if (inputs.size() > 1) { + throw ExceptionsHelper.badRequestException("Unable to do text_similarity on more than one text input at a time"); + } + String context = inputs.get(0); + List tokenizations = tokenizer.tokenize(sequence, context, truncate, span, 0); + TokenizationResult result = tokenizer.buildTokenizationResult(tokenizations); + return result.buildRequest(requestId, truncate); + } + } + + record ResultProcessor(String question, String resultsField, TextSimilarityConfig.SpanScoreFunction function) + implements + NlpTask.ResultProcessor { + + @Override + public InferenceResults processResult(TokenizationResult tokenization, PyTorchInferenceResult pyTorchResult) { + if (pyTorchResult.getInferenceResult().length < 1) { + throw new ElasticsearchStatusException("text_similarity result has no data", RestStatus.INTERNAL_SERVER_ERROR); + } + SpanScoreFunction spanScoreFunction = fromConfig(function); + for (int i = 0; i < pyTorchResult.getInferenceResult()[0].length; i++) { + double[] result = pyTorchResult.getInferenceResult()[0][i]; + if (result.length != 1) { + throw new ElasticsearchStatusException( + "Expected exactly [1] value in text_similarity result; got [{}]", + RestStatus.INTERNAL_SERVER_ERROR, + result.length + ); + } + spanScoreFunction.accept(result[0]); + } + return new TextSimilarityInferenceResults( + Optional.ofNullable(resultsField).orElse(DEFAULT_RESULTS_FIELD), + spanScoreFunction.score(), + tokenization.anyTruncated() + ); + } + } + + static SpanScoreFunction fromConfig(TextSimilarityConfig.SpanScoreFunction spanScoreFunction) { + return switch (spanScoreFunction) { + case MAX -> new Max(); + case MEAN -> new Mean(); + }; + } + + private interface SpanScoreFunction { + void accept(double v); + + double score(); + } + + private static class Max implements SpanScoreFunction { + private double score = Double.NEGATIVE_INFINITY; + + @Override + public void accept(double v) { + score = Math.max(score, v); + } + + @Override + public double score() { + return score; + } + } + + private static class Mean implements SpanScoreFunction { + private double score = 0.0; + private int count = 0; + + @Override + public void accept(double v) { + score += v; + count++; + } + + @Override + public double score() { + return score / count; + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java index e932df01604ad..e19529b705d77 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java @@ -44,10 +44,7 @@ public class ZeroShotClassificationProcessor extends NlpTask.Processor { ZeroShotClassificationProcessor(NlpTokenizer tokenizer, ZeroShotClassificationConfig config) { super(tokenizer); - List lowerCased = config.getClassificationLabels() - .stream() - .map(s -> s.toLowerCase(Locale.ROOT)) - .collect(Collectors.toList()); + List lowerCased = config.getClassificationLabels().stream().map(s -> s.toLowerCase(Locale.ROOT)).toList(); this.entailmentPos = lowerCased.indexOf("entailment"); this.contraPos = lowerCased.indexOf("contradiction"); if (entailmentPos == -1 || contraPos == -1) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessorTests.java index 61c037b712406..b36ce41c5c49d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessorTests.java @@ -12,8 +12,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.inference.results.FillMaskResults; import org.elasticsearch.xpack.core.ml.inference.results.TopClassEntry; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.FillMaskConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.VocabularyConfig; import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.BertTokenizationResult; import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.BertTokenizer; import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.TokenizationResult; @@ -105,8 +103,7 @@ public void testValidate_GivenMissingMaskToken() { BertTokenizer tokenizer = mock(BertTokenizer.class); when(tokenizer.getMaskToken()).thenReturn("[MASK]"); - FillMaskConfig config = new FillMaskConfig(new VocabularyConfig("test-index"), null, null, null); - FillMaskProcessor processor = new FillMaskProcessor(tokenizer, config); + FillMaskProcessor processor = new FillMaskProcessor(tokenizer); ValidationException e = expectThrows(ValidationException.class, () -> processor.validateInputs(input)); assertThat(e.getMessage(), containsString("no [MASK] token could be found")); @@ -118,8 +115,7 @@ public void testProcessResults_GivenMultipleMaskTokens() { BertTokenizer tokenizer = mock(BertTokenizer.class); when(tokenizer.getMaskToken()).thenReturn("[MASK]"); - FillMaskConfig config = new FillMaskConfig(new VocabularyConfig("test-index"), null, null, null); - FillMaskProcessor processor = new FillMaskProcessor(tokenizer, config); + FillMaskProcessor processor = new FillMaskProcessor(tokenizer); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> processor.validateInputs(input)); assertThat(e.getMessage(), containsString("only one [MASK] token should exist in the input")); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessorTests.java index 3fd07c65f25d0..f988da404bdb3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessorTests.java @@ -77,7 +77,7 @@ public void testProcessor() throws IOException { BertTokenization tokenization = new BertTokenization(false, true, 384, Tokenization.Truncate.NONE, 128); BertTokenizer tokenizer = BertTokenizer.builder(TEST_CASED_VOCAB, tokenization).build(); QuestionAnsweringConfig config = new QuestionAnsweringConfig(question, 1, 10, new VocabularyConfig(""), tokenization, "prediction"); - QuestionAnsweringProcessor processor = new QuestionAnsweringProcessor(tokenizer, config); + QuestionAnsweringProcessor processor = new QuestionAnsweringProcessor(tokenizer); TokenizationResult tokenizationResult = processor.getRequestBuilder(config) .buildRequest(List.of(input), "1", Tokenization.Truncate.NONE, 128) .tokenization(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java new file mode 100644 index 0000000000000..5601fd6b8baa8 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.nlp; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.inference.results.TextSimilarityInferenceResults; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.BertTokenization; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.Tokenization; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.VocabularyConfig; +import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.BertTokenizationResult; +import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.BertTokenizer; +import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.TokenizationResult; +import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchInferenceResult; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.ml.inference.nlp.tokenizers.BertTokenizerTests.TEST_CASED_VOCAB; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class TextSimilarityProcessorTests extends ESTestCase { + + // The data here is nonsensical. We just want to make sure tokens chosen match up with our scores + public void testProcessor() throws IOException { + String question = "is Elasticsearch fun?"; + String input = "Pancake day is fun with Elasticsearch and little red car"; + BertTokenization tokenization = new BertTokenization(false, true, 384, Tokenization.Truncate.NONE, 128); + BertTokenizer tokenizer = BertTokenizer.builder(TEST_CASED_VOCAB, tokenization).build(); + TextSimilarityConfig textSimilarityConfig = new TextSimilarityConfig( + question, + new VocabularyConfig(""), + tokenization, + "result", + TextSimilarityConfig.SpanScoreFunction.MAX + ); + TextSimilarityProcessor processor = new TextSimilarityProcessor(tokenizer); + TokenizationResult tokenizationResult = processor.getRequestBuilder(textSimilarityConfig) + .buildRequest(List.of(input), "1", Tokenization.Truncate.NONE, 128) + .tokenization(); + assertThat(tokenizationResult.anyTruncated(), is(false)); + assertThat(tokenizationResult.getTokenization(0).tokenIds().length, equalTo(19)); + // tokenized question length with cls and sep token + assertThat(tokenizationResult.getTokenization(0).seqPairOffset(), equalTo(7)); + double[][][] scores = { { { 42 } } }; + NlpTask.ResultProcessor resultProcessor = processor.getResultProcessor(textSimilarityConfig); + PyTorchInferenceResult pyTorchResult = new PyTorchInferenceResult("1", scores, 1L, false); + TextSimilarityInferenceResults result = (TextSimilarityInferenceResults) resultProcessor.processResult( + tokenizationResult, + pyTorchResult + ); + + // Note this is a different answer to testTopScores because of the question length + assertThat(result.predictedValue(), closeTo(42, 1e-6)); + } + + public void testResultFunctions() { + BertTokenization tokenization = new BertTokenization(false, true, 384, Tokenization.Truncate.NONE, 128); + BertTokenizer tokenizer = BertTokenizer.builder(TEST_CASED_VOCAB, tokenization).build(); + TextSimilarityConfig textSimilarityConfig = new TextSimilarityConfig( + randomAlphaOfLength(10), + new VocabularyConfig(""), + tokenization, + "result", + TextSimilarityConfig.SpanScoreFunction.MAX + ); + TextSimilarityProcessor processor = new TextSimilarityProcessor(tokenizer); + NlpTask.ResultProcessor resultProcessor = processor.getResultProcessor(textSimilarityConfig); + double[][][] scores = { { { 42 }, { 12 }, { 100 } } }; + PyTorchInferenceResult pyTorchResult = new PyTorchInferenceResult("1", scores, 1L, false); + TextSimilarityInferenceResults result = (TextSimilarityInferenceResults) resultProcessor.processResult( + new BertTokenizationResult(List.of(), List.of(), 1), + pyTorchResult + ); + assertThat(result.predictedValue(), equalTo(100.0)); + // Test mean + textSimilarityConfig = new TextSimilarityConfig( + randomAlphaOfLength(10), + new VocabularyConfig(""), + tokenization, + "result", + TextSimilarityConfig.SpanScoreFunction.MEAN + ); + processor = new TextSimilarityProcessor(tokenizer); + resultProcessor = processor.getResultProcessor(textSimilarityConfig); + result = (TextSimilarityInferenceResults) resultProcessor.processResult( + new BertTokenizationResult(List.of(), List.of(), 1), + pyTorchResult + ); + assertThat(result.predictedValue(), closeTo(51.333333333333, 1e-12)); + } + +} From 495722d1ebc1482f96da4a9a1d1504aed3c82163 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 1 Aug 2022 08:46:34 -0400 Subject: [PATCH 026/265] Port tests for `date_range` (#88958) This ports some of the tests for `date_range` from a high overhead `IntegTestCase` to a lower overhead `AggregatorTestCase`. `AggregatorTestCase` is also lower level so it can find more fun things like memory leaks. And it enables values source type testing for `date_range`. Inspired by #55502 by andy.bristol@elastic.co --- .../aggregations/bucket/DateRangeIT.java | 266 ------------------ .../range/DateRangeAggregatorTests.java | 191 ++++++++++++- 2 files changed, 188 insertions(+), 269 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index a8bb7123475f4..7322ae7ad9e4f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -531,272 +531,6 @@ public void testMultiValuedField() throws Exception { assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); } - /* - Feb 2, Mar 3, 1 - Mar 2, Apr 3, 2 - Mar 15, Apr 16, 3 - Apr 2, May 3, 4 - Apr 15, May 16 5 - Apr 23, May 24 6 - */ - - public void testMultiValuedFieldWithValueScript() throws Exception { - Map params = new HashMap<>(); - params.put("fieldname", "dates"); - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - dateRange("range").field("dates") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.DOUBLE_PLUS_ONE_MONTH, params)) - .addUnboundedTo(date(2, 15)) - .addRange(date(2, 15), date(3, 15)) - .addUnboundedFrom(date(3, 15)) - ) - .get(); - - assertSearchResponse(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(1L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 1L)); - } - - /* - Feb 2, Mar 3, 1 - Mar 2, Apr 3, 2 - Mar 15, Apr 16, 3 - Apr 2, May 3, 4 - Apr 15, May 16 5 - Apr 23, May 24 6 - */ - - public void testScriptSingleValue() throws Exception { - Map params = new HashMap<>(); - params.put("fieldname", "date"); - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - dateRange("range").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .addUnboundedTo(date(2, 15)) - .addRange(date(2, 15), date(3, 15)) - .addUnboundedFrom(date(3, 15)) - ) - .get(); - - assertSearchResponse(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); - } - - /* - Jan 2, Feb 3, 1 - Feb 2, Mar 3, 2 - Feb 15, Mar 16, 3 - Mar 2, Apr 3, 4 - Mar 15, Apr 16 5 - Mar 23, Apr 24 6 - */ - - public void testScriptMultiValued() throws Exception { - Map params = new HashMap<>(); - params.put("fieldname", "dates"); - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - dateRange("range").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .addUnboundedTo(date(2, 15)) - .addRange(date(2, 15), date(3, 15)) - .addUnboundedFrom(date(3, 15)) - ) - .get(); - - assertSearchResponse(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); - } - - public void testUnmapped() throws Exception { - client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); - - SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation( - dateRange("range").field("date") - .addUnboundedTo(date(2, 15)) - .addRange(date(2, 15), date(3, 15)) - .addUnboundedFrom(date(3, 15)) - ) - .get(); - - assertSearchResponse(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(0L)); - } - - public void testUnmappedWithStringDates() throws Exception { - SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation( - dateRange("range").field("date") - .addUnboundedTo("2012-02-15") - .addRange("2012-02-15", "2012-03-15") - .addUnboundedFrom("2012-03-15") - ) - .get(); - - assertSearchResponse(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(0L)); - } - public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") .addAggregation( diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java index bd84603d8c1b5..c626d5df7ddc7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java @@ -24,6 +24,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.mapper.BooleanFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -31,22 +32,35 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.MultiBucketConsumerService; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.search.lookup.LeafDocLookup; import java.io.IOException; import java.time.Instant; import java.time.ZoneOffset; import java.time.ZonedDateTime; -import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.function.Consumer; +import java.util.function.Function; +import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.nullValue; public class DateRangeAggregatorTests extends AggregatorTestCase { @@ -56,6 +70,31 @@ public class DateRangeAggregatorTests extends AggregatorTestCase { private static final Instant T1 = ZonedDateTime.of(2015, 11, 13, 16, 14, 34, 0, ZoneOffset.UTC).toInstant(); private static final Instant T2 = ZonedDateTime.of(2016, 11, 13, 16, 14, 34, 0, ZoneOffset.UTC).toInstant(); + /** + * Dates used by scripting tests. + */ + private static final List> DATE_FIELD_VALUES = List.of( + List.of( + ZonedDateTime.of(2012, 1, 2, 0, 0, 0, 0, ZoneOffset.UTC).toInstant(), + ZonedDateTime.of(2012, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC).toInstant() + ), + List.of( + ZonedDateTime.of(2012, 2, 2, 0, 0, 0, 0, ZoneOffset.UTC).toInstant(), + ZonedDateTime.of(2012, 3, 3, 0, 0, 0, 0, ZoneOffset.UTC).toInstant() + ), + List.of( + ZonedDateTime.of(2012, 2, 15, 0, 0, 0, 0, ZoneOffset.UTC).toInstant(), + ZonedDateTime.of(2012, 3, 16, 0, 0, 0, 0, ZoneOffset.UTC).toInstant() + ), + List.of( + ZonedDateTime.of(2012, 3, 2, 0, 0, 0, 0, ZoneOffset.UTC).toInstant(), + ZonedDateTime.of(2012, 4, 3, 0, 0, 0, 0, ZoneOffset.UTC).toInstant() + ) + ); + + private static final String VALUE_SCRIPT_NAME = "value_script"; + private static final String FIELD_SCRIPT_NAME = "field_script"; + public void testBooleanFieldDeprecated() throws IOException { final String fieldName = "bogusBoolean"; testCase(new DateRangeAggregationBuilder("name").field(fieldName).addRange("false", "true"), new MatchAllDocsQuery(), iw -> { @@ -182,7 +221,7 @@ public void testUnboundedRanges() throws IOException { Resolution.MILLISECONDS, null, null, - Collections.emptyMap() + emptyMap() ) ); } @@ -229,6 +268,31 @@ public void testMissingDateStringWithNumberField() throws IOException { }, range -> fail("Should have thrown exception"), fieldType)); } + public void testUnmappedWithoutMissing() throws IOException { + List> rangeTypes = List.of( + builder -> builder.addRange("2015-01-01", "2015-12-31"), + builder -> builder.addRange( + ZonedDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2015, 12, 31, 0, 0, 0, 0, ZoneOffset.UTC) + ) + ); + + for (Consumer rangeType : rangeTypes) { + final DateRangeAggregationBuilder aggregationBuilder = new DateRangeAggregationBuilder("date_range").field("does_not_exist"); + rangeType.accept(aggregationBuilder); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new NumericDocValuesField(NUMBER_FIELD_NAME, 7))); + iw.addDocument(singleton(new NumericDocValuesField(NUMBER_FIELD_NAME, 1))); + }, (InternalDateRange range) -> { + List ranges = range.getBuckets(); + assertEquals(1, ranges.size()); + assertEquals(0, ranges.get(0).getDocCount()); + assertFalse(AggregationInspectionHelper.hasValue(range)); + }); + } + } + public void testUnmappedWithMissingNumber() throws IOException { DateRangeAggregationBuilder aggregationBuilder = new DateRangeAggregationBuilder("date_range").field("does_not_exist") .addRange("2015-11-13", "2015-11-14") @@ -310,6 +374,102 @@ public void testUnmappedWithBadMissingField() { }, range -> fail("Should have thrown exception"), fieldType)); } + public void testValueScriptSingleValuedField() throws IOException { + DateRangeAggregationBuilder aggregationBuilder = new DateRangeAggregationBuilder("date_range").field(DATE_FIELD_NAME) + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-03-15") + .addUnboundedFrom("2012-03-15") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT_NAME, emptyMap())); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + for (List values : DATE_FIELD_VALUES) { + iw.addDocument(List.of(new SortedNumericDocValuesField(DATE_FIELD_NAME, values.get(0).toEpochMilli()))); + } + }, (InternalDateRange range) -> { + List ranges = range.getBuckets(); + assertThat(ranges, hasSize(3)); + + { + Range.Bucket bucket = ranges.get(0); + assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); + assertThat(bucket.getFrom(), nullValue()); + assertThat(bucket.getTo(), equalTo(ZonedDateTime.of(2012, 2, 15, 0, 0, 0, 0, ZoneOffset.UTC))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + + { + Range.Bucket bucket = ranges.get(1); + assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); + assertThat(bucket.getFrom(), equalTo(ZonedDateTime.of(2012, 2, 15, 0, 0, 0, 0, ZoneOffset.UTC))); + assertThat(bucket.getTo(), equalTo(ZonedDateTime.of(2012, 3, 15, 0, 0, 0, 0, ZoneOffset.UTC))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + + { + Range.Bucket bucket = ranges.get(2); + assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); + assertThat(bucket.getFrom(), equalTo(ZonedDateTime.of(2012, 3, 15, 0, 0, 0, 0, ZoneOffset.UTC))); + assertThat(bucket.getTo(), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(2L)); + } + }, new DateFieldMapper.DateFieldType(DATE_FIELD_NAME)); + } + + public void testValueScriptMultiValuedField() throws IOException { + DateRangeAggregationBuilder aggregationBuilder = new DateRangeAggregationBuilder("date_range").field(DATE_FIELD_NAME) + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-03-15") + .addUnboundedFrom("2012-03-15") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT_NAME, emptyMap())); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + for (List values : DATE_FIELD_VALUES) { + iw.addDocument( + values.stream().map(value -> new SortedNumericDocValuesField(DATE_FIELD_NAME, value.toEpochMilli())).toList() + ); + } + }, (InternalDateRange range) -> { + List ranges = range.getBuckets(); + assertThat(ranges, hasSize(3)); + + { + Range.Bucket bucket = ranges.get(0); + assertThat(bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); + assertThat(bucket.getFrom(), nullValue()); + assertThat(bucket.getTo(), equalTo(ZonedDateTime.of(2012, 2, 15, 0, 0, 0, 0, ZoneOffset.UTC))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + + { + Range.Bucket bucket = ranges.get(1); + assertThat(bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); + assertThat(bucket.getFrom(), equalTo(ZonedDateTime.of(2012, 2, 15, 0, 0, 0, 0, ZoneOffset.UTC))); + assertThat(bucket.getTo(), equalTo(ZonedDateTime.of(2012, 3, 15, 0, 0, 0, 0, ZoneOffset.UTC))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + } + + { + Range.Bucket bucket = ranges.get(2); + assertThat(bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); + assertThat(bucket.getFrom(), equalTo(ZonedDateTime.of(2012, 3, 15, 0, 0, 0, 0, ZoneOffset.UTC))); + assertThat(bucket.getTo(), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + }, new DateFieldMapper.DateFieldType(DATE_FIELD_NAME)); + } + private void testBothResolutions( Query query, CheckedBiConsumer buildIndex, @@ -344,7 +504,7 @@ private void testCase( resolution, null, null, - Collections.emptyMap() + emptyMap() ); DateRangeAggregationBuilder aggregationBuilder = new DateRangeAggregationBuilder("test_range_agg"); aggregationBuilder.field(DATE_FIELD_NAME); @@ -386,4 +546,29 @@ public void doAssertReducedMultiBucketConsumer(Aggregation agg, MultiBucketConsu * No-op. */ } + + @Override + protected List getSupportedValuesSourceTypes() { + return List.of(CoreValuesSourceType.DATE); + } + + @Override + protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldType, String fieldName) { + return new DateRangeAggregationBuilder("_name").field(fieldName).addRange("2015-01-01", "2015-12-31"); + } + + @Override + protected ScriptService getMockScriptService() { + final Map, Object>> scripts = Map.of(VALUE_SCRIPT_NAME, vars -> { + Number value = (Number) vars.get("_value"); + return Instant.ofEpochMilli(value.longValue()).atZone(ZoneOffset.UTC).plusMonths(1).toInstant().toEpochMilli(); + }, FIELD_SCRIPT_NAME, vars -> { + String fieldName = (String) vars.get("field"); + LeafDocLookup lookup = (LeafDocLookup) vars.get("doc"); + return lookup.get(fieldName).stream().map(value -> ((ZonedDateTime) value).plusMonths(1).toInstant().toEpochMilli()).toList(); + }); + final MockScriptEngine engine = new MockScriptEngine(MockScriptEngine.NAME, scripts, emptyMap()); + final Map engines = Map.of(engine.getType(), engine); + return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS, () -> 0); + } } From 4607182ce8c147d48ed79ea807fc9bb705b24001 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 1 Aug 2022 08:47:23 -0400 Subject: [PATCH 027/265] synthetic source: fix scaled_float rounding (#88916) There were some cases where synthetic source wasn't properly rounding in round trips. `0.15527719259262085` with a scaling factor of `2.4206374697469164E16` was round tripping to `0.15527719259262088` which then round trips up to `0.0.1552771925926209`, rounding the wrong direction! This fixes the round tripping in this case through ever more paranoid double checking and nudging. Closes #88854 --- docs/changelog/88916.yaml | 6 +++ .../mapper/extras/ScaledFloatFieldMapper.java | 36 +++++++--------- .../extras/ScaledFloatFieldMapperTests.java | 42 +++++++++++++------ 3 files changed, 51 insertions(+), 33 deletions(-) create mode 100644 docs/changelog/88916.yaml diff --git a/docs/changelog/88916.yaml b/docs/changelog/88916.yaml new file mode 100644 index 0000000000000..fb8d051988cd9 --- /dev/null +++ b/docs/changelog/88916.yaml @@ -0,0 +1,6 @@ +pr: 88916 +summary: "Synthetic source: fix `scaled_float` rounding" +area: "TSDB" +type: bug +issues: + - 88854 diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index cf03716f82b30..40dc7ebd390af 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -704,29 +704,25 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { * assert scaled2 != Long.MAX_VALUE; * } *

- * We work around this by detecting such cases and artificially bumping them - * up by a single digit in the last place, forcing them to always saturate - * the {@link Math#round} call. + * This can happen sometimes with regular old rounding too, in situations that + * aren't entirely clear at the moment. We work around this by detecting when + * the round trip wouldn't produce the same encoded value and artificially + * bumping them up by a single digit in the last place towards the direction + * that would make the round trip consistent. Bumping by a single digit in + * the last place is always enough to correct the tiny errors that can sneak + * in from the unexpected rounding. */ static double decodeForSyntheticSource(long scaledValue, double scalingFactor) { - if (scaledValue == Long.MAX_VALUE) { - double max = Long.MAX_VALUE / scalingFactor; - if (Math.round(max * scalingFactor) != Long.MAX_VALUE) { - double v = max + Math.ulp(max); - assert Math.round(v * scalingFactor) == Long.MAX_VALUE; - return v; - } - return max; - } - if (scaledValue == Long.MIN_VALUE) { - double min = Long.MIN_VALUE / scalingFactor; - if (Math.round(min * scalingFactor) != Long.MIN_VALUE) { - double v = min - Math.ulp(min); - assert Math.round(v * scalingFactor) == Long.MIN_VALUE; - return v; + double v = scaledValue / scalingFactor; + long reenc = Math.round(v * scalingFactor); + if (reenc != scaledValue) { + if (reenc > scaledValue) { + v -= Math.ulp(v); + } else { + v += Math.ulp(v); } - return min; + assert Math.round(v * scalingFactor) == scaledValue : Math.round(v * scalingFactor) + " != " + scaledValue; } - return scaledValue / scalingFactor; + return v; } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java index 52d524ca0ea02..66a066b6b0752 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java @@ -383,21 +383,15 @@ private Tuple generateValue() { private double round(double d) { long encoded = Math.round(d * scalingFactor); - if (encoded == Long.MAX_VALUE) { - double max = Long.MAX_VALUE / scalingFactor; - if (max * scalingFactor != Long.MAX_VALUE) { - return max + Math.ulp(max); + double decoded = encoded / scalingFactor; + long reencoded = Math.round(decoded * scalingFactor); + if (encoded != reencoded) { + if (encoded > reencoded) { + return decoded + Math.ulp(decoded); } - return max; + return decoded - Math.ulp(decoded); } - if (encoded == Long.MIN_VALUE) { - double min = Long.MIN_VALUE / scalingFactor; - if (min * scalingFactor != Long.MIN_VALUE) { - return min - Math.ulp(min); - } - return min; - } - return encoded / scalingFactor; + return decoded; } private void mapping(XContentBuilder b) throws IOException { @@ -487,6 +481,28 @@ public void testEncodeDecodeSaturatedHigh() { assertThat(encodeDecode(saturated, scalingFactor), equalTo(max)); } + public void testEncodeDecodeRandom() { + double scalingFactor = randomDoubleBetween(0, Double.MAX_VALUE, false); + double v = randomValue(); + double once = encodeDecode(v, scalingFactor); + double twice = encodeDecode(once, scalingFactor); + assertThat(twice, equalTo(once)); + } + + /** + * Tests that a value and scaling factor that won't + * properly round trip without a "nudge" to keep + * them from rounding in the wrong direction on the + * second iteration. + */ + public void testEncodeDecodeNeedNudge() { + double scalingFactor = 2.4206374697469164E16; + double v = 0.15527719259262085; + double once = encodeDecode(v, scalingFactor); + double twice = encodeDecode(once, scalingFactor); + assertThat(twice, equalTo(once)); + } + /** * Tests that any encoded value with that can that fits in the mantissa of * a double precision floating point can be round tripped through synthetic From 87ab933c8b89c10c746243a44ec3443eed5a6aab Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 1 Aug 2022 08:48:03 -0400 Subject: [PATCH 028/265] Remove calls to deprecated xcontent method (#84733) This removes many calls to the last remaining `createParser` method that I deprecated in #79814, migrating callers to one of the new methods that it created. --- .../subphase/FetchSourcePhaseBenchmark.java | 5 +-- .../client/RestHighLevelClient.java | 23 ++++++------- .../provider/json/JsonXContentGenerator.java | 20 ++--------- .../ingest/common/JsonProcessor.java | 9 ++--- .../ingest/common/ScriptProcessor.java | 4 +-- .../ingest/useragent/DeviceTypeParser.java | 8 ++--- .../ingest/useragent/UserAgentParser.java | 5 ++- .../useragent/DeviceTypeParserTests.java | 7 ++-- .../painless/ContextGeneratorCommon.java | 5 +-- .../action/PainlessExecuteRequestTests.java | 5 +-- .../org/elasticsearch/reindex/Reindexer.java | 6 ++-- .../remote/RemoteScrollableHitSource.java | 7 ++-- .../upgrades/SearchStatesIT.java | 7 ++-- .../MultiVersionRepositoryAccessIT.java | 7 +--- .../http/snapshots/RestGetSnapshotsIT.java | 9 ++--- .../java/org/elasticsearch/script/Script.java | 5 ++- .../script/StoredScriptSource.java | 4 +-- .../support/AbstractFilteringTestCase.java | 8 ++--- .../blobstore/BlobStoreTestUtil.java | 4 +-- .../search/RandomSearchRequestGenerator.java | 9 ++--- .../elasticsearch/test/rest/ObjectPath.java | 11 ++----- .../restspec/ClientYamlSuiteRestSpec.java | 11 ++----- .../yaml/section/ClientYamlTestSuite.java | 5 ++- .../watcher/common/http/HttpProxyTests.java | 33 ++++--------------- .../execution/ExecutionServiceTests.java | 9 ++--- .../watcher/input/http/HttpInputTests.java | 5 ++- .../test/integration/SearchInputTests.java | 8 ++--- .../xpack/restart/FullClusterRestartIT.java | 9 ++--- 28 files changed, 75 insertions(+), 173 deletions(-) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/fetch/subphase/FetchSourcePhaseBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/fetch/subphase/FetchSourcePhaseBenchmark.java index ff93e41e9915d..a2eba3ac68332 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/fetch/subphase/FetchSourcePhaseBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/fetch/subphase/FetchSourcePhaseBenchmark.java @@ -8,8 +8,6 @@ import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.FetchSourcePhase; import org.elasticsearch.search.lookup.SourceLookup; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -108,8 +106,7 @@ public BytesReference filterXContentOnBuilder() throws IOException { XContentType.JSON.toParsedMediaType() ); try ( - XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, sourceBytes.streamInput()) + XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, sourceBytes.streamInput()) ) { builder.copyCurrentStructure(parser); return BytesReference.bytes(builder); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 0f452e5b9ce1c..06881a4c960dc 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -177,6 +177,7 @@ import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.Closeable; @@ -244,7 +245,7 @@ public class RestHighLevelClient implements Closeable { // To be called using performClientRequest and performClientRequestAsync to ensure version compatibility check private final RestClient client; - private final NamedXContentRegistry registry; + private final XContentParserConfiguration parserConfig; private final CheckedConsumer doClose; private final boolean useAPICompatibility; @@ -297,11 +298,19 @@ protected RestHighLevelClient( ) { this.client = Objects.requireNonNull(restClient, "restClient must not be null"); this.doClose = Objects.requireNonNull(doClose, "doClose consumer must not be null"); - this.registry = new NamedXContentRegistry( + NamedXContentRegistry registry = new NamedXContentRegistry( Stream.of(getDefaultNamedXContents().stream(), getProvidedNamedXContents().stream(), namedXContentEntries.stream()) .flatMap(Function.identity()) .collect(toList()) ); + /* + * Ignores deprecation warnings. This is appropriate because it is only + * used to parse responses from Elasticsearch. Any deprecation warnings + * emitted there just mean that you are talking to an old version of + * Elasticsearch. There isn't anything you can do about the deprecation. + */ + this.parserConfig = XContentParserConfiguration.EMPTY.withRegistry(registry) + .withDeprecationHandler(DeprecationHandler.IGNORE_DEPRECATIONS); if (useAPICompatibility == null && "true".equals(System.getenv(API_VERSIONING_ENV_VARIABLE))) { this.useAPICompatibility = true; } else { @@ -1165,7 +1174,7 @@ protected final Resp parseEntity(final HttpEntity entity, final CheckedFu if (xContentType == null) { throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); } - try (XContentParser parser = xContentType.xContent().createParser(registry, DEPRECATION_HANDLER, entity.getContent())) { + try (XContentParser parser = xContentType.xContent().createParser(parserConfig, entity.getContent())) { return entityParser.apply(parser); } } @@ -1506,14 +1515,6 @@ private Optional getVersionValidation(Response response) throws IOExcept return Optional.empty(); } - /** - * Ignores deprecation warnings. This is appropriate because it is only - * used to parse responses from Elasticsearch. Any deprecation warnings - * emitted there just mean that you are talking to an old version of - * Elasticsearch. There isn't anything you can do about the deprecation. - */ - private static final DeprecationHandler DEPRECATION_HANDLER = DeprecationHandler.IGNORE_DEPRECATIONS; - static List getDefaultNamedXContents() { Map> map = new HashMap<>(); map.put(CardinalityAggregationBuilder.NAME, (p, c) -> ParsedCardinality.fromXContent(p, (String) c)); diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java index c1db5079e5b4c..89c9a5ac62af9 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java @@ -21,13 +21,12 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Streams; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentGenerationException; import org.elasticsearch.xcontent.XContentGenerator; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.provider.filtering.FilterPathBasedFilter; @@ -444,14 +443,7 @@ public void writeRawField(String name, InputStream content) throws IOException { @Override public void writeRawField(String name, InputStream content, XContentType contentType) throws IOException { if (mayWriteRawData(contentType) == false) { - // EMPTY is safe here because we never call namedObject when writing raw data - try ( - XContentParser parser = XContentFactory.xContent(contentType) - // It's okay to pass the throwing deprecation handler - // because we should not be writing raw fields when - // generating JSON - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, content) - ) { + try (XContentParser parser = XContentFactory.xContent(contentType).createParser(XContentParserConfiguration.EMPTY, content)) { parser.nextToken(); writeFieldName(name); copyCurrentStructure(parser); @@ -493,13 +485,7 @@ protected boolean supportsRawWrites() { } protected void copyRawValue(InputStream stream, XContent xContent) throws IOException { - // EMPTY is safe here because we never call namedObject - try ( - XContentParser parser = xContent - // It's okay to pass the throwing deprecation handler because we - // should not be writing raw fields when generating JSON - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream) - ) { + try (XContentParser parser = xContent.createParser(XContentParserConfiguration.EMPTY, stream)) { copyCurrentStructure(parser); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java index 83cd59bc1b4be..a79954de0f35c 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java @@ -14,9 +14,8 @@ import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; @@ -77,11 +76,7 @@ public static Object apply(Object fieldValue, boolean allowDuplicateKeys) { BytesReference bytesRef = fieldValue == null ? new BytesArray("null") : new BytesArray(fieldValue.toString()); try ( InputStream stream = bytesRef.streamInput(); - XContentParser parser = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - stream - ) + XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, stream) ) { parser.allowDuplicateKeys(allowDuplicateKeys); XContentParser.Token token = parser.nextToken(); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java index 2586b9aed919d..fb538a0b6b264 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java @@ -19,9 +19,9 @@ import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -110,7 +110,7 @@ public ScriptProcessor create( XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(config); InputStream stream = BytesReference.bytes(builder).streamInput(); XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) ) { Script script = Script.parse(parser); diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java index 2ab7bbbb1b32b..e6bba12a37484 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java @@ -9,10 +9,10 @@ package org.elasticsearch.ingest.useragent; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.ingest.useragent.UserAgentParser.VersionedName; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -24,7 +24,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import static org.elasticsearch.ingest.useragent.UserAgentParser.VersionedName; import static org.elasticsearch.ingest.useragent.UserAgentParser.readParserConfigurations; public class DeviceTypeParser { @@ -40,9 +39,8 @@ public class DeviceTypeParser { private final HashMap> deviceTypePatterns = new HashMap<>(); public void init(InputStream regexStream) throws IOException { - // EMPTY is safe here because we don't use namedObject XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, regexStream); + .createParser(XContentParserConfiguration.EMPTY, regexStream); XContentParser.Token token = yamlParser.nextToken(); diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java index 6eba8501f8820..37e54f56984b7 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java @@ -9,10 +9,9 @@ package org.elasticsearch.ingest.useragent; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -50,7 +49,7 @@ final class UserAgentParser { private void init(InputStream regexStream) throws IOException { // EMPTY is safe here because we don't use namedObject XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, regexStream); + .createParser(XContentParserConfiguration.EMPTY, regexStream); XContentParser.Token token = yamlParser.nextToken(); diff --git a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java index f3bca428ac0fd..6543ef2095b87 100644 --- a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java +++ b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java @@ -8,11 +8,11 @@ package org.elasticsearch.ingest.useragent; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.ingest.useragent.UserAgentParser.VersionedName; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.junit.BeforeClass; @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.ingest.useragent.UserAgentParser.VersionedName; import static org.elasticsearch.ingest.useragent.UserAgentParser.readParserConfigurations; import static org.hamcrest.Matchers.is; @@ -33,7 +32,7 @@ public class DeviceTypeParserTests extends ESTestCase { private ArrayList> readTestDevices(InputStream regexStream, String keyName) throws IOException { XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, regexStream); + .createParser(XContentParserConfiguration.EMPTY, regexStream); XContentParser.Token token = yamlParser.nextToken(); diff --git a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java index 17c835f5a96a6..a83f3b0ac6cec 100644 --- a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java +++ b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java @@ -15,6 +15,7 @@ import org.elasticsearch.painless.action.PainlessContextInstanceBindingInfo; import org.elasticsearch.painless.action.PainlessContextMethodInfo; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; @@ -36,7 +37,7 @@ public class ContextGeneratorCommon { public static List getContextInfos() throws IOException { URLConnection getContextNames = new URL("http://" + System.getProperty("cluster.uri") + "/_scripts/painless/_context") .openConnection(); - XContentParser parser = JsonXContent.jsonXContent.createParser(null, null, getContextNames.getInputStream()); + XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, getContextNames.getInputStream()); parser.nextToken(); parser.nextToken(); @SuppressWarnings("unchecked") @@ -50,7 +51,7 @@ public static List getContextInfos() throws IOException { URLConnection getContextInfo = new URL( "http://" + System.getProperty("cluster.uri") + "/_scripts/painless/_context?context=" + contextName ).openConnection(); - parser = JsonXContent.jsonXContent.createParser(null, null, getContextInfo.getInputStream()); + parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, getContextInfo.getInputStream()); contextInfos.add(PainlessContextInfo.fromXContent(parser)); ((HttpURLConnection) getContextInfo).disconnect(); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java index 6d82f55fbb4dd..7d957cca4d0b0 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java @@ -9,10 +9,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.painless.action.PainlessExecuteAction.Request.ContextSetup; @@ -51,8 +49,7 @@ public final void testFromXContent() throws Exception { try (XContentBuilder builder = XContentBuilder.builder(xContent)) { builder.value(testInstance); - StreamInput instanceInput = BytesReference.bytes(builder).streamInput(); - try (XContentParser parser = xContent.createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, instanceInput)) { + try (XContentParser parser = createParser(xContent, BytesReference.bytes(builder).streamInput())) { PainlessExecuteAction.Request result = PainlessExecuteAction.Request.parse(parser); assertThat(result, equalTo(testInstance)); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java index b573ec1597f17..4c3206e82b8d6 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java @@ -54,10 +54,9 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -323,8 +322,7 @@ protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc) // we need to convert try ( InputStream stream = doc.getSource().streamInput(); - XContentParser parser = sourceXContentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream); + XContentParser parser = sourceXContentType.xContent().createParser(XContentParserConfiguration.EMPTY, stream); XContentBuilder builder = XContentBuilder.builder(mainRequestXContentType.xContent()) ) { parser.nextToken(); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java index 720ef6d443299..b6728d09d89f8 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java @@ -33,9 +33,9 @@ import org.elasticsearch.index.reindex.ScrollableHitSource; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -192,7 +192,10 @@ public void onSuccess(org.elasticsearch.client.Response response) { // EMPTY is safe here because we don't call namedObject try ( XContentParser xContentParser = xContentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, content) + .createParser( + XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + content + ) ) { parsedResponse = parser.apply(xContentParser, xContentType); } catch (XContentParseException e) { diff --git a/qa/ccs-rolling-upgrade-remote-cluster/src/javaRestTest/java/org/elasticsearch/upgrades/SearchStatesIT.java b/qa/ccs-rolling-upgrade-remote-cluster/src/javaRestTest/java/org/elasticsearch/upgrades/SearchStatesIT.java index 73d6cdf4711a7..64b0d6d61064e 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/src/javaRestTest/java/org/elasticsearch/upgrades/SearchStatesIT.java +++ b/qa/ccs-rolling-upgrade-remote-cluster/src/javaRestTest/java/org/elasticsearch/upgrades/SearchStatesIT.java @@ -43,9 +43,9 @@ import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.test.rest.yaml.ObjectPath; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; @@ -189,8 +189,7 @@ void verifySearch(String localIndex, int localNumDocs, String remoteIndex, int r Response response = localClient.performRequest(request); try ( XContentParser parser = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + XContentParserConfiguration.EMPTY, response.getEntity().getContent() ) ) { diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index 3530f0d1064ba..edd87d8fd381e 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; -import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; @@ -243,11 +242,7 @@ private void deleteSnapshot(String repoName, String name) throws IOException { private List> listSnapshots(String repoName) throws IOException { try ( InputStream entity = client().performRequest(new Request("GET", "/_snapshot/" + repoName + "/_all")).getEntity().getContent(); - XContentParser parser = JsonXContent.jsonXContent.createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - entity - ) + XContentParser parser = createParser(JsonXContent.jsonXContent, entity) ) { return (List>) parser.map().get("snapshots"); } diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index dcdaf2419dea6..d107cd702a6fa 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -23,9 +23,8 @@ import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; @@ -444,11 +443,7 @@ private static Request baseGetSnapshotsRequest(String repoName) { private static GetSnapshotsResponse readSnapshotInfos(Response response) throws IOException { try ( InputStream input = response.getEntity().getContent(); - XContentParser parser = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - input - ) + XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, input) ) { return GetSnapshotsResponse.fromXContent(parser); } diff --git a/server/src/main/java/org/elasticsearch/script/Script.java b/server/src/main/java/org/elasticsearch/script/Script.java index 86625e8a259dd..d21cdc50e00b5 100644 --- a/server/src/main/java/org/elasticsearch/script/Script.java +++ b/server/src/main/java/org/elasticsearch/script/Script.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.xcontent.AbstractObjectParser; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser.ValueType; import org.elasticsearch.xcontent.ParseField; @@ -28,6 +27,7 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser.Token; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -303,8 +303,7 @@ public static Script parse(Settings settings) { try ( InputStream stream = BytesReference.bytes(builder).streamInput(); XContentParser parser = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, + XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream ) ) { diff --git a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java index 278d92a598275..9e1ab0efb543b 100644 --- a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser.ValueType; import org.elasticsearch.xcontent.ParseField; @@ -27,6 +26,7 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser.Token; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -188,7 +188,7 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon try ( InputStream stream = content.streamInput(); XContentParser parser = xContentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) ) { Token token = parser.nextToken(); diff --git a/test/framework/src/main/java/org/elasticsearch/common/xcontent/support/AbstractFilteringTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/xcontent/support/AbstractFilteringTestCase.java index 057dfd7ed6d66..90002d336a6dd 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/xcontent/support/AbstractFilteringTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/xcontent/support/AbstractFilteringTestCase.java @@ -12,12 +12,11 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.FilterXContentParserWrapper; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.BufferedReader; @@ -52,10 +51,7 @@ protected static Builder builderFor(String file) { return builder -> { try (InputStream stream = AbstractFilteringTestCase.class.getResourceAsStream(file)) { assertThat("Couldn't find [" + file + "]", stream, notNullValue()); - try ( - XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream) - ) { + try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, stream)) { // copyCurrentStructure does not property handle filters when it is passed a json parser. So we hide it. return builder.copyCurrentStructure(new FilterXContentParserWrapper(parser) { }); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java index be472e2ae24d8..8304901864ba4 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java @@ -42,8 +42,8 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.DataInputStream; @@ -114,7 +114,7 @@ public static PlainActionFuture assertConsistencyAsync(BlobStore try ( InputStream blob = blobContainer.readBlob(BlobStoreRepository.INDEX_FILE_PREFIX + latestGen); XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, blob) + .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), blob) ) { repositoryData = RepositoryData.snapshotsFromXContent(parser, latestGen, false); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java index a0bb992745354..2189603d88029 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -34,11 +34,10 @@ import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.test.AbstractQueryTestCase; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -310,11 +309,7 @@ public static SearchSourceBuilder randomSearchSourceBuilder( jsonBuilder.endArray(); jsonBuilder.endObject(); XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(jsonBuilder).streamInput() - ); + .createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(jsonBuilder).streamInput()); parser.nextToken(); parser.nextToken(); parser.nextToken(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java index 7256b40c00d0e..5cbb8138c7b50 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java @@ -11,11 +11,10 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -38,13 +37,7 @@ public static ObjectPath createFromResponse(Response response) throws IOExceptio } public static ObjectPath createFromXContent(XContent xContent, BytesReference input) throws IOException { - try ( - XContentParser parser = xContent.createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - input.streamInput() - ) - ) { + try (XContentParser parser = xContent.createParser(XContentParserConfiguration.EMPTY, input.streamInput())) { if (parser.nextToken() == XContentParser.Token.START_ARRAY) { return new ObjectPath(parser.listOrderedMap()); } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java index 4ef7d7d2c525a..be34ee9be0ea1 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java @@ -7,10 +7,9 @@ */ package org.elasticsearch.test.rest.yaml.restspec; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.test.ClasspathUtils; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; @@ -94,13 +93,7 @@ public static ClientYamlSuiteRestSpec load(String classpathPrefix) throws Except private static void parseSpecFile(ClientYamlSuiteRestApiParser restApiParser, Path jsonFile, ClientYamlSuiteRestSpec restSpec) { try (InputStream stream = Files.newInputStream(jsonFile)) { - try ( - XContentParser parser = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - stream - ) - ) { + try (XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, stream)) { String filename = jsonFile.getFileName().toString(); if (filename.equals("_common.json")) { parseCommonSpec(parser, restSpec); diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java index d5be9214965e6..eed04fc9ac72f 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java @@ -10,10 +10,10 @@ import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.Channels; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.yaml.YamlXContent; import java.io.IOException; @@ -63,8 +63,7 @@ public static ClientYamlTestSuite parse(NamedXContentRegistry executeableSection try ( XContentParser parser = YamlXContent.yamlXContent.createParser( - executeableSectionRegistry, - LoggingDeprecationHandler.INSTANCE, + XContentParserConfiguration.EMPTY.withRegistry(executeableSectionRegistry), Files.newInputStream(file) ) ) { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpProxyTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpProxyTests.java index a94e69e32729a..7b43f65e05f97 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpProxyTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpProxyTests.java @@ -10,12 +10,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -37,11 +36,7 @@ public void testParser() throws Exception { builder.endObject(); try ( XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(builder).streamInput() - ) + .createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(builder).streamInput()) ) { parser.nextToken(); HttpProxy proxy = HttpProxy.parse(parser); @@ -63,11 +58,7 @@ public void testParserValidScheme() throws Exception { .endObject(); try ( XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(builder).streamInput() - ) + .createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(builder).streamInput()) ) { parser.nextToken(); expectThrows(IllegalArgumentException.class, () -> HttpProxy.parse(parser)); @@ -78,11 +69,7 @@ public void testParserValidPortRange() throws Exception { XContentBuilder builder = jsonBuilder().startObject().field("host", "localhost").field("port", -1).endObject(); try ( XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(builder).streamInput() - ) + .createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(builder).streamInput()) ) { parser.nextToken(); expectThrows(ElasticsearchParseException.class, () -> HttpProxy.parse(parser)); @@ -93,11 +80,7 @@ public void testParserNoHost() throws Exception { XContentBuilder builder = jsonBuilder().startObject().field("port", -1).endObject(); try ( XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(builder).streamInput() - ) + .createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(builder).streamInput()) ) { parser.nextToken(); expectThrows(ElasticsearchParseException.class, () -> HttpProxy.parse(parser)); @@ -108,11 +91,7 @@ public void testParserNoPort() throws Exception { XContentBuilder builder = jsonBuilder().startObject().field("host", "localhost").endObject(); try ( XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(builder).streamInput() - ) + .createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(builder).streamInput()) ) { parser.nextToken(); expectThrows(ElasticsearchParseException.class, () -> HttpProxy.parse(parser)); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index a80b0398c270f..19c29466d655d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -34,11 +34,10 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; @@ -1136,11 +1135,7 @@ public void testUpdateWatchStatusDoesNotUpdateState() throws Exception { UpdateRequest request = (UpdateRequest) invocation.getArguments()[0]; try ( XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - request.doc().source().streamInput() - ) + .createParser(XContentParserConfiguration.EMPTY, request.doc().source().streamInput()) ) { Map map = parser.map(); Map state = ObjectPath.eval("status.state", map); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/http/HttpInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/http/HttpInputTests.java index 0a7c4eb0e5154..de90764e4895a 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/http/HttpInputTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/http/HttpInputTests.java @@ -13,13 +13,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; @@ -356,7 +355,7 @@ public void testExceptionCase() throws Exception { BytesReference bytes = BytesReference.bytes(builder); try ( XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput()) + .createParser(XContentParserConfiguration.EMPTY, bytes.streamInput()) ) { Map data = parser.map(); String reason = ObjectPath.eval("error.reason", data); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java index e4367c7dc1c20..28e538fc3921b 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java @@ -26,12 +26,12 @@ import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.input.Input; @@ -209,11 +209,7 @@ public void testThatEmptyRequestBodyWorks() throws Exception { .endObject() .endObject(); XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(builder).streamInput() - ) + .createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(builder).streamInput()) ) { parser.nextToken(); // advance past the first starting object diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 182f73e6afd2f..bea6bfc1d6e27 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -26,11 +26,10 @@ import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; @@ -629,11 +628,7 @@ public void testSlmPolicyAndStats() throws IOException { XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); try ( XContentParser parser = xContentType.xContent() - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - response.getEntity().getContent() - ) + .createParser(XContentParserConfiguration.EMPTY, response.getEntity().getContent()) ) { assertEquals(new SnapshotLifecycleStats(), SnapshotLifecycleStats.parse(parser)); } From e64eb8cd4f7b23a1e0e2edaf88b7bb62a06e9253 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Mon, 1 Aug 2022 15:16:17 +0200 Subject: [PATCH 029/265] [ML] Frequent Items: use a bitset for deduplication (#88943) Speedup frequent_items by using bitsets instead of lists of longs. With this item sets can be faster de-duplicated. A bit is set according to the order of top items (by count). --- docs/changelog/88943.yaml | 5 + .../CountingItemSetTraverser.java | 42 +- .../frequentitemsets/EclatMapReducer.java | 32 +- .../FrequentItemSetCollector.java | 76 ++- .../aggs/frequentitemsets/ItemSetBitSet.java | 239 +++++++++ .../frequentitemsets/ItemSetTraverser.java | 61 ++- .../frequentitemsets/TransactionStore.java | 13 +- .../FrequentItemSetCollectorTests.java | 187 +++---- .../frequentitemsets/ItemSetBitSetTests.java | 242 +++++++++ .../ItemSetTraverserTests.java | 466 +++++++++++------- 10 files changed, 996 insertions(+), 367 deletions(-) create mode 100644 docs/changelog/88943.yaml create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSet.java create mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSetTests.java diff --git a/docs/changelog/88943.yaml b/docs/changelog/88943.yaml new file mode 100644 index 0000000000000..63dd57750ffb5 --- /dev/null +++ b/docs/changelog/88943.yaml @@ -0,0 +1,5 @@ +pr: 88943 +summary: "Frequent Items: use a bitset for deduplication" +area: Machine Learning +type: enhancement +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java index a0d4b407f5feb..4d9de3a86e23c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java @@ -7,11 +7,13 @@ package org.elasticsearch.xpack.ml.aggs.frequentitemsets; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.FixedBitSet; -import org.apache.lucene.util.LongsRef; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.ml.aggs.frequentitemsets.TransactionStore.TopItemIds; import java.io.IOException; import java.util.Arrays; @@ -30,6 +32,7 @@ * if [a, b] is not in T, [a, b, c] can not be in T either */ class CountingItemSetTraverser implements Releasable { + private static final Logger logger = LogManager.getLogger(CountingItemSetTraverser.class); // start size and size increment for the occurences stack private static final int OCCURENCES_SIZE_INCREMENT = 10; @@ -48,13 +51,19 @@ class CountingItemSetTraverser implements Releasable { // growable bit set from java util private java.util.BitSet visited; - CountingItemSetTraverser(TransactionStore transactionStore, int cacheTraversalDepth, int cacheNumberOfTransactions, long minCount) { + CountingItemSetTraverser( + TransactionStore transactionStore, + TopItemIds topItemIds, + int cacheTraversalDepth, + int cacheNumberOfTransactions, + long minCount + ) { this.transactionStore = transactionStore; boolean success = false; try { // we allocate 2 big arrays, if the 2nd allocation fails, ensure we clean up - this.topItemSetTraverser = transactionStore.getTopItemIdTraverser(); + this.topItemSetTraverser = new ItemSetTraverser(topItemIds); this.topTransactionIds = transactionStore.getTopTransactionIds(); success = true; } finally { @@ -80,11 +89,15 @@ public boolean next(long earlyStopMinCount) throws IOException { final long totalTransactionCount = transactionStore.getTotalTransactionCount(); int depth = topItemSetTraverser.getNumberOfItems(); + long occurencesOfSingleItem = transactionStore.getItemCount(topItemSetTraverser.getItemId()); + if (depth == 1) { // at the 1st level, we can take the count directly from the transaction store - occurencesStack[0] = transactionStore.getItemCount(topItemSetTraverser.getItemId()); + occurencesStack[0] = occurencesOfSingleItem; + return true; + } else if (occurencesOfSingleItem < earlyStopMinCount) { + rememberCountInStack(depth, occurencesOfSingleItem); return true; - // till a certain depth store results in a cache matrix } else if (depth < cacheTraversalDepth) { // get the cached skip count @@ -187,7 +200,7 @@ public long getCount() { /** * Get the count of the item set without the last item */ - public long getPreviousCount() { + public long getParentCount() { if (topItemSetTraverser.getNumberOfItems() > 1) { return occurencesStack[topItemSetTraverser.getNumberOfItems() - 2]; } @@ -201,7 +214,7 @@ public boolean hasBeenVisited() { return true; } - public boolean hasPredecessorBeenVisited() { + public boolean hasParentBeenVisited() { if (topItemSetTraverser.getNumberOfItems() > 1) { return visited.get(topItemSetTraverser.getNumberOfItems() - 2); } @@ -214,7 +227,7 @@ public void setVisited() { } } - public void setPredecessorVisited() { + public void setParentVisited() { if (topItemSetTraverser.getNumberOfItems() > 1) { visited.set(topItemSetTraverser.getNumberOfItems() - 2); } @@ -228,10 +241,15 @@ public int getNumberOfItems() { } /** - * Get the current item set + * + * Get a bitset representation of the current item set */ - public LongsRef getItemSet() { - return topItemSetTraverser.getItemSet(); + public ItemSetBitSet getItemSetBitSet() { + return topItemSetTraverser.getItemSetBitSet(); + } + + public ItemSetBitSet getParentItemSetBitSet() { + return topItemSetTraverser.getParentItemSetBitSet(); } /** @@ -250,7 +268,7 @@ public boolean atLeaf() { @Override public void close() { - Releasables.close(topItemSetTraverser, topTransactionIds); + Releasables.close(topTransactionIds); } // remember the count in the stack without tracking push and pop diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java index 92d31fc7fe118..ef7c168d1fa0d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.LongsRef; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -25,6 +24,7 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.ml.aggs.frequentitemsets.FrequentItemSetCollector.FrequentItemSet; +import org.elasticsearch.xpack.ml.aggs.frequentitemsets.TransactionStore.TopItemIds; import org.elasticsearch.xpack.ml.aggs.frequentitemsets.mr.AbstractItemSetMapReducer; import org.elasticsearch.xpack.ml.aggs.frequentitemsets.mr.ItemSetMapReduceValueSource.Field; @@ -338,8 +338,6 @@ private static EclatResult eclat( final long totalTransactionCount = transactionStore.getTotalTransactionCount(); Map profilingInfo = null; long minCount = (long) Math.ceil(totalTransactionCount * minimumSupport); - FrequentItemSetCollector collector = new FrequentItemSetCollector(transactionStore, size, minCount); - long numberOfSetsChecked = 0; if (profilingInfoReduce != null) { profilingInfo = new LinkedHashMap<>(profilingInfoReduce); @@ -347,8 +345,10 @@ private static EclatResult eclat( } try ( + TopItemIds topItemIds = transactionStore.getTopItemIds(); CountingItemSetTraverser setTraverser = new CountingItemSetTraverser( transactionStore, + topItemIds, BITSET_CACHE_TRAVERSAL_DEPTH, (int) Math.min(MAX_BITSET_CACHE_NUMBER_OF_TRANSACTIONS, totalTransactionCount), minCount @@ -360,7 +360,8 @@ private static EclatResult eclat( minCount, transactionStore.getTotalItemCount() ); - + FrequentItemSetCollector collector = new FrequentItemSetCollector(transactionStore, topItemIds, size, minCount); + long numberOfSetsChecked = 0; long previousMinCount = 0; while (setTraverser.next(minCount)) { @@ -402,8 +403,11 @@ private static EclatResult eclat( if (setTraverser.atLeaf() && setTraverser.hasBeenVisited() == false && setTraverser.getCount() >= minCount - && setTraverser.getItemSet().length >= minimumSetSize) { - minCount = collector.add(setTraverser.getItemSet(), setTraverser.getCount()); + && setTraverser.getItemSetBitSet().cardinality() >= minimumSetSize) { + + logger.trace("add after prune"); + + minCount = collector.add(setTraverser.getItemSetBitSet(), setTraverser.getCount()); // no need to set visited, as we are on a leaf } @@ -418,19 +422,17 @@ private static EclatResult eclat( * * iff the count of the subset is higher, collect */ - if (setTraverser.hasPredecessorBeenVisited() == false - && setTraverser.getItemSet().length > minimumSetSize - && setTraverser.getCount() < setTraverser.getPreviousCount()) { + if (setTraverser.hasParentBeenVisited() == false + && setTraverser.getItemSetBitSet().cardinality() > minimumSetSize + && setTraverser.getCount() < setTraverser.getParentCount()) { // add the set without the last item - LongsRef subItemSet = setTraverser.getItemSet().clone(); - subItemSet.length--; - minCount = collector.add(subItemSet, setTraverser.getPreviousCount()); + minCount = collector.add(setTraverser.getParentItemSetBitSet(), setTraverser.getParentCount()); } // closed set criteria: the predecessor is no longer of interest: either we reported in the previous step or we found a // super set - setTraverser.setPredecessorVisited(); + setTraverser.setParentVisited(); /** * Iff the traverser reached a leaf, the item set can not be further expanded, e.g. we reached [f]: @@ -445,8 +447,8 @@ private static EclatResult eclat( * * Note: this also covers the last item, e.g. [a, x, y] */ - if (setTraverser.atLeaf() && setTraverser.getItemSet().length >= minimumSetSize) { - minCount = collector.add(setTraverser.getItemSet(), setTraverser.getCount()); + if (setTraverser.atLeaf() && setTraverser.getItemSetBitSet().cardinality() >= minimumSetSize) { + minCount = collector.add(setTraverser.getItemSetBitSet(), setTraverser.getCount()); // no need to set visited, as we are on a leaf } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollector.java index e38f1dde9b2e5..1ceb5935ae2cf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollector.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.LongsRef; import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -20,11 +19,11 @@ import org.elasticsearch.search.aggregations.Aggregation.CommonFields; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.ml.aggs.frequentitemsets.TransactionStore.TopItemIds; import org.elasticsearch.xpack.ml.aggs.frequentitemsets.mr.ItemSetMapReduceValueSource.Field; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -159,7 +158,8 @@ public String toString() { */ class FrequentItemSetCandidate { - private LongsRef items; + private static final int STARTBITS = 64; + private ItemSetBitSet items; private long docCount; // every set has a unique id, required for the outer logic @@ -167,15 +167,16 @@ class FrequentItemSetCandidate { private FrequentItemSetCandidate() { this.id = -1; - this.items = new LongsRef(10); + this.items = new ItemSetBitSet(STARTBITS); this.docCount = -1; } FrequentItemSet toFrequentItemSet(List fields) throws IOException { Map> frequentItemsKeyValues = new HashMap<>(); - for (int i = 0; i < items.length; ++i) { - Tuple item = transactionStore.getItem(items.longs[i]); + int pos = items.nextSetBit(0); + while (pos > 0) { + Tuple item = transactionStore.getItem(topItemIds.getItemIdAt(pos - 1)); final Field field = fields.get(item.v1()); Object formattedValue = field.formatValue(item.v2()); String fieldName = fields.get(item.v1()).getName(); @@ -187,6 +188,8 @@ FrequentItemSet toFrequentItemSet(List fields) throws IOException { l.add(formattedValue); frequentItemsKeyValues.put(fieldName, l); } + + pos = items.nextSetBit(++pos); } return new FrequentItemSet(frequentItemsKeyValues, docCount, (double) docCount / transactionStore.getTotalTransactionCount()); @@ -196,7 +199,7 @@ long getDocCount() { return docCount; } - LongsRef getItems() { + ItemSetBitSet getItems() { return items; } @@ -205,17 +208,11 @@ int getId() { } int size() { - return items.length; + return items.cardinality(); } - private void reset(int id, LongsRef items, long docCount) { - if (items.length > this.items.length) { - this.items = new LongsRef(items.length); - } - - System.arraycopy(items.longs, 0, this.items.longs, 0, items.length); - - this.items.length = items.length; + private void reset(int id, ItemSetBitSet items, long docCount) { + this.items.reset(items); this.docCount = docCount; this.id = id; } @@ -229,10 +226,7 @@ static class FrequentItemSetPriorityQueue extends PriorityQueue setsThatShareSameDocCount = frequentItemsByCount.get(docCount); - if (setsThatShareSameDocCount != null) { - for (FrequentItemSetCandidate otherSet : setsThatShareSameDocCount) { - if (otherSet.size() < itemSet.length) { - continue; - } - - // quick, intrinsic optimized prefix matching - int commonPrefix = Arrays.mismatch(otherSet.items.longs, 0, otherSet.items.longs.length, itemSet.longs, 0, itemSet.length); + private boolean hasSuperSet(ItemSetBitSet itemSetBitSet, long docCount) { + List setsThatShareSameDocCountBits = frequentItemsByCount.get(docCount); - if (commonPrefix == -1 || commonPrefix == itemSet.length) { + if (setsThatShareSameDocCountBits != null) { + for (FrequentItemSetCandidate otherSet : setsThatShareSameDocCountBits) { + if (itemSetBitSet.isSubset(otherSet.getItems())) { return true; } - - int pos = commonPrefix; - int posOther = commonPrefix; - - while (otherSet.size() - posOther >= itemSet.length - pos) { - if (otherSet.items.longs[posOther++] == itemSet.longs[pos]) { - pos++; - if (pos == itemSet.length) { - return true; - } - } - } } } + return false; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSet.java new file mode 100644 index 0000000000000..9a87fad024101 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSet.java @@ -0,0 +1,239 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.aggs.frequentitemsets; + +import org.apache.lucene.util.ArrayUtil; + +import java.util.Arrays; + +/** + * Custom implementation of a bitset for fast item set deduplication. + * + * Unfortunately other {@code BitSet} implementation, e.g. java.util, + * lack a subset check. + * + * For this implementation I took the code from {@code BitSet}, removed + * unnecessary parts and added additional functionality like the subset check. + * Cardinality - the number of set bits == number of items - is used a lot. + * The original {@code BitSet} uses a scan, this implementation uses + * a counter for faster retrieval. + */ +class ItemSetBitSet implements Cloneable { + + // taken from {@code BitSet} + private static final int ADDRESS_BITS_PER_WORD = 6; + private static final int BITS_PER_WORD = 1 << ADDRESS_BITS_PER_WORD; + + /* Used to shift left or right for a partial word mask */ + private static final long WORD_MASK = 0xffffffffffffffffL; + + private long[] words; + private transient int wordsInUse = 0; + private int cardinality = 0; + + ItemSetBitSet() { + initWords(BITS_PER_WORD); + } + + ItemSetBitSet(int nbits) { + // nbits can't be negative; size 0 is OK + if (nbits < 0) throw new NegativeArraySizeException("nbits < 0: " + nbits); + + initWords(nbits); + } + + void reset(ItemSetBitSet bitSet) { + words = ArrayUtil.grow(words, bitSet.wordsInUse); + System.arraycopy(bitSet.words, 0, this.words, 0, bitSet.wordsInUse); + this.cardinality = bitSet.cardinality; + this.wordsInUse = bitSet.wordsInUse; + } + + void set(int bitIndex) { + if (bitIndex < 0) throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); + + int wordIndex = wordIndex(bitIndex); + expandTo(wordIndex); + + final long oldWord = words[wordIndex]; + words[wordIndex] |= (1L << bitIndex); // Restores invariants + + if (oldWord != words[wordIndex]) { + cardinality++; + } + } + + boolean get(int bitIndex) { + if (bitIndex < 0) throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); + + int wordIndex = wordIndex(bitIndex); + return (wordIndex < wordsInUse) && ((words[wordIndex] & (1L << bitIndex)) != 0); + } + + void clear(int bitIndex) { + if (bitIndex < 0) throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); + + int wordIndex = wordIndex(bitIndex); + if (wordIndex >= wordsInUse) return; + + final long oldWord = words[wordIndex]; + + words[wordIndex] &= ~(1L << bitIndex); + if (oldWord != words[wordIndex]) { + cardinality--; + } + recalculateWordsInUse(); + } + + /** + * Returns true if the specified {@code ItemBitSet} is a subset of this + * set. + * + * @param set {@code ItemBitSet} to check + * @return true if the given set is a subset of this set + */ + boolean isSubset(ItemSetBitSet set) { + if (wordsInUse > set.wordsInUse) { + return false; + } + + for (int i = wordsInUse - 1; i >= 0; i--) + if ((words[i] & set.words[i]) != words[i]) return false; + + return true; + } + + int nextSetBit(int fromIndex) { + if (fromIndex < 0) throw new IndexOutOfBoundsException("fromIndex < 0: " + fromIndex); + + int u = wordIndex(fromIndex); + if (u >= wordsInUse) return -1; + + long word = words[u] & (WORD_MASK << fromIndex); + + while (true) { + if (word != 0) return (u * BITS_PER_WORD) + Long.numberOfTrailingZeros(word); + if (++u == wordsInUse) return -1; + word = words[u]; + } + } + + int cardinality() { + return cardinality; + } + + public static int compare(ItemSetBitSet a, ItemSetBitSet b) { + if (a.cardinality != b.cardinality) { + return a.cardinality > b.cardinality ? 1 : -1; + } + + if (a.wordsInUse != b.wordsInUse) { + return a.wordsInUse < b.wordsInUse ? 1 : -1; + } + + int i = Arrays.mismatch(a.words, 0, a.wordsInUse, b.words, 0, b.wordsInUse); + + if (i == -1) { + return 0; + } + + return a.words[i] < b.words[i] ? 1 : -1; + } + + @Override + public Object clone() { + trimToSize(); + + try { + ItemSetBitSet result = (ItemSetBitSet) super.clone(); + result.words = words.clone(); + return result; + } catch (CloneNotSupportedException e) { + throw new InternalError(e); + } + } + + @Override + public String toString() { + final int MAX_INITIAL_CAPACITY = Integer.MAX_VALUE - 8; + int numBits = wordsInUse * BITS_PER_WORD; + // Avoid overflow in the case of a humongous numBits + int initialCapacity = (numBits <= (MAX_INITIAL_CAPACITY - 2) / 6) ? 6 * numBits + 2 : MAX_INITIAL_CAPACITY; + StringBuilder b = new StringBuilder(initialCapacity); + + for (int i = 0; i < wordsInUse; ++i) { + b.append(words[i]); + b.append(" "); + } + + return b.toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + if (this == obj) { + return true; + } + + final ItemSetBitSet set = (ItemSetBitSet) obj; + if (wordsInUse != set.wordsInUse) return false; + + // Check words in use by both BitSets + for (int i = 0; i < wordsInUse; i++) + if (words[i] != set.words[i]) return false; + + return true; + } + + @Override + public int hashCode() { + // Arrays.hashCode does not support subarrays + int result = 1; + for (int i = 0; i < wordsInUse; i++) { + int elementHash = (int) (words[i] ^ (words[i] >>> 32)); + result = 31 * result + elementHash; + } + + return result; + } + + private void trimToSize() { + if (wordsInUse != words.length) { + words = Arrays.copyOf(words, wordsInUse); + } + } + + private void initWords(int nbits) { + words = new long[wordIndex(nbits - 1) + 1]; + } + + private void recalculateWordsInUse() { + // Traverse the bitset until a used word is found + int i; + for (i = wordsInUse - 1; i >= 0; i--) + if (words[i] != 0) break; + + wordsInUse = i + 1; // The new logical size + } + + private void expandTo(int wordIndex) { + int wordsRequired = wordIndex + 1; + if (wordsInUse < wordsRequired) { + words = ArrayUtil.grow(words, wordsRequired); + wordsInUse = wordsRequired; + } + } + + private static int wordIndex(int bitIndex) { + return bitIndex >> ADDRESS_BITS_PER_WORD; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetTraverser.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetTraverser.java index ee3e6479de404..41d8c43fd4ffd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetTraverser.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetTraverser.java @@ -7,9 +7,9 @@ package org.elasticsearch.xpack.ml.aggs.frequentitemsets; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.LongsRef; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; import java.util.ArrayList; import java.util.List; @@ -27,7 +27,7 @@ * Note: In order to avoid churn, the traverser is reusing objects as much as it can, * see the comments containing the non-optimized code */ -class ItemSetTraverser implements Releasable { +class ItemSetTraverser { // start size and size increment for array holding items private static final int SIZE_INCREMENT = 100; @@ -39,12 +39,21 @@ class ItemSetTraverser implements Releasable { private final List itemIterators = new ArrayList<>(); private LongsRef itemIdStack = new LongsRef(SIZE_INCREMENT); + private final ItemSetBitSet itemPositionsVector; + private final ItemSetBitSet itemPositionsVectorParent; + private IntsRef itemPositionsStack = new IntsRef(SIZE_INCREMENT); + private int stackPosition = 0; ItemSetTraverser(TransactionStore.TopItemIds topItemIds) { this.topItemIds = topItemIds; // push the first iterator itemIterators.add(topItemIds.iterator()); + + // create a bit vector that corresponds to the number of items + itemPositionsVector = new ItemSetBitSet((int) topItemIds.size()); + // create a bit vector that corresponds to the item set + itemPositionsVectorParent = new ItemSetBitSet((int) topItemIds.size()); } /** @@ -81,25 +90,33 @@ public boolean next() { return false; } itemIdStack.length--; + itemPositionsStack.length--; + itemPositionsVectorParent.clear(itemPositionsStack.ints[itemPositionsStack.length]); + itemPositionsVector.clear(itemPositionsStack.ints[itemPositionsStack.length]); } } // push a new iterator on the stack + + int itemPosition = itemIterators.get(stackPosition).getIndex(); // non-optimized: itemIterators.add(topItemIds.iterator(itemIteratorStack.peek().getIndex())); if (itemIterators.size() == stackPosition + 1) { - itemIterators.add(topItemIds.iterator(itemIterators.get(stackPosition).getIndex())); + itemIterators.add(topItemIds.iterator(itemPosition)); } else { - itemIterators.get(stackPosition + 1).reset(itemIterators.get(stackPosition).getIndex()); + itemIterators.get(stackPosition + 1).reset(itemPosition); } - if (itemIdStack.longs.length == itemIdStack.length) { - LongsRef resizedItemIdStack2 = new LongsRef(itemIdStack.length + SIZE_INCREMENT); - System.arraycopy(itemIdStack.longs, 0, resizedItemIdStack2.longs, 0, itemIdStack.length); - resizedItemIdStack2.length = itemIdStack.length; - itemIdStack = resizedItemIdStack2; + growStacksIfNecessary(); + itemIdStack.longs[itemIdStack.length++] = itemId; + + // set the position from the previous step + if (itemPositionsStack.length > 0) { + itemPositionsVectorParent.set(itemPositionsStack.ints[itemPositionsStack.length - 1]); } - itemIdStack.longs[itemIdStack.length++] = itemId; + // set the position from the this step + itemPositionsStack.ints[itemPositionsStack.length++] = itemPosition; + itemPositionsVector.set(itemPosition); ++stackPosition; return true; @@ -113,6 +130,14 @@ public LongsRef getItemSet() { return itemIdStack; } + public ItemSetBitSet getItemSetBitSet() { + return itemPositionsVector; + } + + public ItemSetBitSet getParentItemSetBitSet() { + return itemPositionsVectorParent; + } + public int getNumberOfItems() { return stackPosition; } @@ -132,11 +157,19 @@ public void prune() { return; } itemIdStack.length--; + itemPositionsStack.length--; + itemPositionsVectorParent.clear(itemPositionsStack.ints[itemPositionsStack.length]); + itemPositionsVector.clear(itemPositionsStack.ints[itemPositionsStack.length]); } - @Override - public void close() { - Releasables.close(topItemIds); + private void growStacksIfNecessary() { + if (itemIdStack.longs.length == itemIdStack.length) { + itemIdStack.longs = ArrayUtil.grow(itemIdStack.longs, itemIdStack.length + SIZE_INCREMENT); + } + + if (itemPositionsStack.ints.length == itemPositionsStack.length) { + itemPositionsStack.ints = ArrayUtil.grow(itemPositionsStack.ints, itemPositionsStack.length + SIZE_INCREMENT); + } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStore.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStore.java index 0269abe47b628..5a4b48dc1c53c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStore.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStore.java @@ -86,6 +86,10 @@ public IdIterator iterator(int startIndex) { return new IdIterator(startIndex); } + public long getItemIdAt(long index) { + return sortedItems.get(index); + } + public long size() { return sortedItems.size(); } @@ -346,15 +350,6 @@ public TopItemIds getTopItemIds() { return getTopItemIds(getItems().size()); } - /** - * Get a traverser object to traverse top items - * - * @return a top item traverser - */ - public ItemSetTraverser getTopItemIdTraverser() { - return new ItemSetTraverser(getTopItemIds()); - } - /** * Check if a transaction specified by id contains the item * diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollectorTests.java index cd759af3559db..671f7b3a7c07f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollectorTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.ml.aggs.frequentitemsets; -import org.apache.lucene.util.LongsRef; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; @@ -16,6 +15,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.aggs.frequentitemsets.FrequentItemSetCollector.FrequentItemSetPriorityQueue; +import org.elasticsearch.xpack.ml.aggs.frequentitemsets.TransactionStore.TopItemIds; import org.junit.After; import java.io.IOException; @@ -38,141 +38,148 @@ public void closeReleasables() throws IOException { public void testQueue() { transactionStore = new HashBasedTransactionStore(mockBigArrays()); - FrequentItemSetCollector collector = new FrequentItemSetCollector(transactionStore, 5, Long.MAX_VALUE); + try (TopItemIds topItemIds = transactionStore.getTopItemIds();) { + FrequentItemSetCollector collector = new FrequentItemSetCollector(transactionStore, topItemIds, 5, Long.MAX_VALUE); - assertEquals(Long.MAX_VALUE, collector.add(longsRef(1L, 2L, 3L, 4L), 10L)); - assertEquals(Long.MAX_VALUE, collector.add(longsRef(5L, 6L, 7L, 8L), 11L)); - assertEquals(Long.MAX_VALUE, collector.add(longsRef(11L, 12L, 13L, 14L), 9L)); - assertEquals(Long.MAX_VALUE, collector.add(longsRef(21L, 2L, 3L, 4L), 13L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 1L, 2L, 3L, 4L }, 10L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 5L, 6L, 7L, 8L }, 11L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 11L, 12L, 13L, 14L }, 9L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 21L, 2L, 3L, 4L }, 13L)); - // queue should be full, drop weakest element - assertEquals(9L, collector.add(longsRef(31L, 2L, 3L, 4L), 14L)); - assertEquals(10L, collector.add(longsRef(41L, 2L, 3L, 4L), 15L)); - assertEquals(11L, collector.add(longsRef(51L, 2L, 3L, 4L), 16L)); + // queue should be full, drop weakest element + assertEquals(9L, addToCollector(collector, new long[] { 31L, 2L, 3L, 4L }, 14L)); + assertEquals(10L, addToCollector(collector, new long[] { 41L, 2L, 3L, 4L }, 15L)); + assertEquals(11L, addToCollector(collector, new long[] { 51L, 2L, 3L, 4L }, 16L)); - // check that internal data has been removed as well - assertEquals(5, collector.getFrequentItemsByCount().size()); + // check that internal data has been removed as well + assertEquals(5, collector.getFrequentItemsByCount().size()); - // fill slots with same doc count - assertEquals(13L, collector.add(longsRef(61L, 2L, 3L, 4L), 20L)); - assertEquals(14L, collector.add(longsRef(71L, 2L, 3L, 4L), 20L)); - assertEquals(15L, collector.add(longsRef(81L, 2L, 3L, 4L), 20L)); - assertEquals(16L, collector.add(longsRef(91L, 2L, 3L, 4L), 20L)); - assertEquals(20L, collector.add(longsRef(101L, 2L, 3L, 4L), 20L)); + // fill slots with same doc count + assertEquals(13L, addToCollector(collector, new long[] { 61L, 2L, 3L, 4L }, 20L)); + assertEquals(14L, addToCollector(collector, new long[] { 71L, 2L, 3L, 4L }, 20L)); + assertEquals(15L, addToCollector(collector, new long[] { 81L, 2L, 3L, 4L }, 20L)); + assertEquals(16L, addToCollector(collector, new long[] { 91L, 2L, 3L, 4L }, 20L)); + assertEquals(20L, addToCollector(collector, new long[] { 101L, 2L, 3L, 4L }, 20L)); - // check that internal map has only 1 key - assertEquals(1, collector.getFrequentItemsByCount().size()); + // check that internal map has only 1 key + assertEquals(1, collector.getFrequentItemsByCount().size()); - // ignore set below current weakest one - assertEquals(20L, collector.add(longsRef(111L, 2L, 3L, 4L), 1L)); + // ignore set below current weakest one + assertEquals(20L, addToCollector(collector, new long[] { 111L, 2L, 3L, 4L }, 1L)); - FrequentItemSetPriorityQueue queue = collector.getQueue(); + FrequentItemSetPriorityQueue queue = collector.getQueue(); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 61L, 2L, 3L, 4L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 71L, 2L, 3L, 4L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 81L, 2L, 3L, 4L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 91L, 2L, 3L, 4L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 101L, 2L, 3L, 4L })); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 101L, 2L, 3L, 4L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 91L, 2L, 3L, 4L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 81L, 2L, 3L, 4L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 71L, 2L, 3L, 4L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 61L, 2L, 3L, 4L }))); - assertEquals(0, collector.size()); + assertEquals(0, collector.size()); + } } public void testClosedSetSkipping() { transactionStore = new HashBasedTransactionStore(mockBigArrays()); - FrequentItemSetCollector collector = new FrequentItemSetCollector(transactionStore, 5, Long.MAX_VALUE); + try (TopItemIds topItemIds = transactionStore.getTopItemIds();) { + FrequentItemSetCollector collector = new FrequentItemSetCollector(transactionStore, topItemIds, 5, Long.MAX_VALUE); - assertEquals(Long.MAX_VALUE, collector.add(longsRef(1L, 2L, 3L, 4L), 10L)); - assertEquals(Long.MAX_VALUE, collector.add(longsRef(5L, 6L, 7L, 8L), 11L)); - assertEquals(Long.MAX_VALUE, collector.add(longsRef(11L, 12L, 13L, 14L), 12L)); - assertEquals(Long.MAX_VALUE, collector.add(longsRef(21L, 2L, 3L, 4L), 13L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 1L, 2L, 3L, 4L }, 10L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 5L, 6L, 7L, 8L }, 11L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 11L, 12L, 13L, 14L }, 12L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 21L, 2L, 3L, 4L }, 13L)); - // add a subset of the 1st entry, it should be ignored - assertEquals(Long.MAX_VALUE, collector.add(longsRef(1L, 2L, 3L), 10L)); + // add a subset of the 1st entry, it should be ignored + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 1L, 2L, 3L }, 10L)); - // fill slots with same doc count - assertEquals(10L, collector.add(longsRef(61L, 2L, 3L, 4L), 20L)); - assertEquals(11L, collector.add(longsRef(71L, 2L, 3L, 4L), 20L)); - assertEquals(12L, collector.add(longsRef(81L, 2L, 3L, 4L), 20L)); - assertEquals(13L, collector.add(longsRef(91L, 2L, 3L, 4L), 20L)); + // fill slots with same doc count + assertEquals(10L, addToCollector(collector, new long[] { 61L, 2L, 3L, 4L }, 20L)); + assertEquals(11L, addToCollector(collector, new long[] { 71L, 2L, 3L, 4L }, 20L)); + assertEquals(12L, addToCollector(collector, new long[] { 81L, 2L, 3L, 4L }, 20L)); + assertEquals(13L, addToCollector(collector, new long[] { 91L, 2L, 3L, 4L }, 20L)); - // add a subset of an entry, it should be ignored - assertEquals(13L, collector.add(longsRef(81L, 2L, 4L), 20L)); + // add a subset of an entry, it should be ignored + assertEquals(13L, addToCollector(collector, new long[] { 81L, 2L, 4L }, 20L)); - FrequentItemSetPriorityQueue queue = collector.getQueue(); + FrequentItemSetPriorityQueue queue = collector.getQueue(); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 21L, 2L, 3L, 4L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 61L, 2L, 3L, 4L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 71L, 2L, 3L, 4L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 81L, 2L, 3L, 4L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 91L, 2L, 3L, 4L })); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 21L, 2L, 3L, 4L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 91L, 2L, 3L, 4L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 81L, 2L, 3L, 4L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 71L, 2L, 3L, 4L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 61L, 2L, 3L, 4L }))); - assertEquals(0, collector.size()); + assertEquals(0, collector.size()); + } } public void testCopyOnAdd() { transactionStore = new HashBasedTransactionStore(mockBigArrays()); + try (TopItemIds topItemIds = transactionStore.getTopItemIds();) { + FrequentItemSetCollector collector = new FrequentItemSetCollector(transactionStore, topItemIds, 5, Long.MAX_VALUE); + long[] itemSet = new long[] { 1L, 2L, 3L, 4L, 5L }; - FrequentItemSetCollector collector = new FrequentItemSetCollector(transactionStore, 5, Long.MAX_VALUE); - LongsRef itemSet = longsRef(1L, 2L, 3L, 4L, 5L); + assertEquals(Long.MAX_VALUE, addToCollector(collector, itemSet, 10L)); + itemSet[0] = 42L; + itemSet[4] = 42L; - assertEquals(Long.MAX_VALUE, collector.add(itemSet, 10L)); - itemSet.longs[0] = 42L; - itemSet.longs[4] = 42L; + FrequentItemSetPriorityQueue queue = collector.getQueue(); - FrequentItemSetPriorityQueue queue = collector.getQueue(); - - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 1L, 2L, 3L, 4L, 5L })); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 1L, 2L, 3L, 4L, 5L }))); + } } public void testLargerItemSetsPreference() { transactionStore = new HashBasedTransactionStore(mockBigArrays()); + try (TopItemIds topItemIds = transactionStore.getTopItemIds();) { + FrequentItemSetCollector collector = new FrequentItemSetCollector(transactionStore, topItemIds, 5, Long.MAX_VALUE); - FrequentItemSetCollector collector = new FrequentItemSetCollector(transactionStore, 5, Long.MAX_VALUE); - - assertEquals(Long.MAX_VALUE, collector.add(longsRef(1L, 2L, 3L, 4L), 10L)); - assertEquals(Long.MAX_VALUE, collector.add(longsRef(5L, 6L, 7L, 8L), 11L)); - assertEquals(Long.MAX_VALUE, collector.add(longsRef(11L, 12L, 13L, 14L), 9L)); - assertEquals(Long.MAX_VALUE, collector.add(longsRef(21L, 2L, 3L, 4L), 13L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 1L, 2L, 3L, 4L }, 10L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 5L, 6L, 7L, 8L }, 11L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 11L, 12L, 13L, 14L }, 9L)); + assertEquals(Long.MAX_VALUE, addToCollector(collector, new long[] { 21L, 2L, 3L, 4L }, 13L)); - // queue should be full, drop weakest element - assertEquals(9L, collector.add(longsRef(31L, 2L, 3L, 4L), 14L)); + // queue should be full, drop weakest element + assertEquals(9L, addToCollector(collector, new long[] { 31L, 2L, 3L, 4L }, 14L)); - assertEquals(9L, collector.getLastSet().getDocCount()); - assertEquals(4, collector.getLastSet().size()); + assertEquals(9L, collector.getLastSet().getDocCount()); + assertEquals(4, collector.getLastSet().size()); - // ignore set with same doc count but fewer items - assertEquals(9L, collector.add(longsRef(22L, 23L, 24L), 9L)); + // ignore set with same doc count but fewer items + assertEquals(9L, addToCollector(collector, new long[] { 22L, 23L, 24L }, 9L)); - assertEquals(9L, collector.getLastSet().getDocCount()); - assertEquals(4, collector.getLastSet().size()); + assertEquals(9L, collector.getLastSet().getDocCount()); + assertEquals(4, collector.getLastSet().size()); - // take set with same doc count but more items - assertEquals(9L, collector.add(longsRef(25L, 26L, 27L, 28L, 29L), 9L)); + // take set with same doc count but more items + assertEquals(9L, addToCollector(collector, new long[] { 25L, 26L, 27L, 28L, 29L }, 9L)); - assertEquals(9L, collector.getLastSet().getDocCount()); - assertEquals(5, collector.getLastSet().size()); + assertEquals(9L, collector.getLastSet().getDocCount()); + assertEquals(5, collector.getLastSet().size()); - FrequentItemSetPriorityQueue queue = collector.getQueue(); + FrequentItemSetPriorityQueue queue = collector.getQueue(); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 25L, 26L, 27L, 28L, 29L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 1L, 2L, 3L, 4L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 5L, 6L, 7L, 8L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 21L, 2L, 3L, 4L })); - assertThat(queue.pop().getItems().longs, equalTo(new long[] { 31L, 2L, 3L, 4L })); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 25L, 26L, 27L, 28L, 29L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 1L, 2L, 3L, 4L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 5L, 6L, 7L, 8L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 21L, 2L, 3L, 4L }))); + assertThat(queue.pop().getItems(), equalTo(createItemSetBitSet(new long[] { 31L, 2L, 3L, 4L }))); - assertEquals(0, collector.size()); + assertEquals(0, collector.size()); + } } - private static LongsRef longsRef(long l1, long l2, long l3) { - return new LongsRef(new long[] { l1, l2, l3 }, 0, 3); - } + private static ItemSetBitSet createItemSetBitSet(long[] longs) { + ItemSetBitSet itemsAsBitVector = new ItemSetBitSet(); + for (int i = 0; i < longs.length; ++i) { + itemsAsBitVector.set((int) longs[i]); + } - private static LongsRef longsRef(long l1, long l2, long l3, long l4) { - return new LongsRef(new long[] { l1, l2, l3, l4 }, 0, 4); + return itemsAsBitVector; } - private static LongsRef longsRef(long l1, long l2, long l3, long l4, long l5) { - return new LongsRef(new long[] { l1, l2, l3, l4, l5 }, 0, 5); + private static long addToCollector(FrequentItemSetCollector collector, long[] longsRef, long docCount) { + return collector.add(createItemSetBitSet(longsRef), docCount); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSetTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSetTests.java new file mode 100644 index 0000000000000..b70775391f122 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSetTests.java @@ -0,0 +1,242 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.aggs.frequentitemsets; + +import org.elasticsearch.test.ESTestCase; + +public class ItemSetBitSetTests extends ESTestCase { + + public void testBasics() { + ItemSetBitSet set = new ItemSetBitSet(); + set.set(0); + set.set(3); + set.set(200); + set.set(5); + set.set(65); + + assertEquals(5, set.cardinality()); + assertTrue(set.get(0)); + assertFalse(set.get(1)); + assertFalse(set.get(2)); + assertTrue(set.get(3)); + assertTrue(set.get(5)); + assertFalse(set.get(64)); + assertTrue(set.get(65)); + assertTrue(set.get(200)); + + set.clear(0); + set.clear(65); + set.clear(5); + assertEquals(2, set.cardinality()); + + assertFalse(set.get(0)); + assertFalse(set.get(1)); + assertFalse(set.get(2)); + assertTrue(set.get(3)); + assertFalse(set.get(5)); + assertFalse(set.get(64)); + assertFalse(set.get(65)); + assertTrue(set.get(200)); + } + + public void testIsSubSet() { + ItemSetBitSet set1 = new ItemSetBitSet(); + set1.set(0); + set1.set(3); + set1.set(200); + set1.set(5); + set1.set(65); + + assertEquals(5, set1.cardinality()); + ItemSetBitSet set2 = new ItemSetBitSet(); + set2.set(3); + set2.set(200); + set2.set(65); + + assertEquals(3, set2.cardinality()); + assertTrue(set2.isSubset(set1)); + assertFalse(set1.isSubset(set2)); + assertTrue(set1.isSubset(set1)); + + set2.set(0); + set2.set(5); + assertTrue(set2.isSubset(set1)); + assertTrue(set1.isSubset(set2)); + + set2.set(99); + assertFalse(set2.isSubset(set1)); + assertTrue(set1.isSubset(set2)); + + set1.set(999); + assertFalse(set1.isSubset(set2)); + set2.set(999); + assertTrue(set1.isSubset(set2)); + set2.set(2222); + assertTrue(set1.isSubset(set2)); + } + + public void testClone() { + ItemSetBitSet set1 = new ItemSetBitSet(); + set1.set(0); + set1.set(3); + set1.set(200); + set1.set(5); + set1.set(65); + + ItemSetBitSet set2 = (ItemSetBitSet) set1.clone(); + assertEquals(5, set2.cardinality()); + + assertTrue(set2.get(0)); + assertFalse(set2.get(1)); + assertFalse(set2.get(2)); + assertTrue(set2.get(3)); + assertTrue(set2.get(5)); + assertFalse(set2.get(64)); + assertTrue(set2.get(65)); + assertTrue(set2.get(200)); + + set1.clear(200); + assertTrue(set2.get(200)); + + set1.set(42); + assertTrue(set1.get(42)); + assertFalse(set2.get(42)); + } + + public void testReset() { + ItemSetBitSet set1 = new ItemSetBitSet(); + set1.set(0); + set1.set(3); + set1.set(200); + set1.set(5); + set1.set(65); + + ItemSetBitSet set2 = new ItemSetBitSet(); + set2.reset(set1); + assertEquals(set1, set2); + assertEquals(set1.cardinality(), set2.cardinality()); + + assertTrue(set2.get(0)); + assertFalse(set2.get(1)); + assertFalse(set2.get(2)); + assertTrue(set2.get(3)); + assertTrue(set2.get(5)); + assertFalse(set2.get(64)); + assertTrue(set2.get(65)); + assertTrue(set2.get(200)); + + set1.clear(200); + assertTrue(set2.get(200)); + + set1.set(42); + assertTrue(set1.get(42)); + assertFalse(set2.get(42)); + + set2.set(99999999); + assertTrue(set2.get(99999999)); + + ItemSetBitSet set3 = new ItemSetBitSet(); + set3.set(2); + set3.set(9); + set2.reset(set3); + + assertEquals(set3, set2); + assertFalse(set2.get(99999999)); + } + + public void testCardinality() { + ItemSetBitSet set = new ItemSetBitSet(); + set.set(0); + set.set(3); + set.set(200); + set.set(5); + set.set(65); + + assertEquals(5, set.cardinality()); + set.clear(1); + assertEquals(5, set.cardinality()); + set.clear(200); + assertEquals(4, set.cardinality()); + set.set(204); + set.set(204); + set.set(204); + set.set(204); + assertEquals(5, set.cardinality()); + ItemSetBitSet set2 = new ItemSetBitSet(); + set.reset(set2); + assertEquals(0, set.cardinality()); + set.clear(999); + } + + public void testHashCode() { + ItemSetBitSet set1 = new ItemSetBitSet(); + set1.set(0); + set1.set(3); + set1.set(200); + set1.set(5); + set1.set(65); + + ItemSetBitSet set2 = new ItemSetBitSet(); + set2.reset(set1); + + assertEquals(set1.hashCode(), set2.hashCode()); + set2.set(99999999); + assertNotEquals(set1.hashCode(), set2.hashCode()); + set2.clear(99999999); + assertEquals(set1.hashCode(), set2.hashCode()); + } + + public void testCompare() { + ItemSetBitSet set1 = new ItemSetBitSet(); + set1.set(0); + set1.set(3); + ItemSetBitSet set2 = new ItemSetBitSet(); + set2.set(0); + + assertEquals(1, ItemSetBitSet.compare(set1, set2)); + assertEquals(-1, ItemSetBitSet.compare(set2, set1)); + + set2.set(3); + assertEquals(0, ItemSetBitSet.compare(set2, set1)); + set1.set(4); + set2.set(5); + + assertEquals(1, ItemSetBitSet.compare(set1, set2)); + set1.set(6); + set2.set(6); + assertEquals(1, ItemSetBitSet.compare(set1, set2)); + set1.set(7); + set2.set(8); + assertEquals(1, ItemSetBitSet.compare(set1, set2)); + + ItemSetBitSet set3 = new ItemSetBitSet(); + set3.set(2); + set3.set(3); + set3.set(4); + ItemSetBitSet set4 = new ItemSetBitSet(); + set4.set(2); + set4.set(3); + set4.set(4); + + set3.set(71); + set4.set(101); + assertEquals(1, ItemSetBitSet.compare(set3, set4)); + assertEquals(-1, ItemSetBitSet.compare(set4, set3)); + + set3.set(61); + assertEquals(1, ItemSetBitSet.compare(set3, set4)); + assertEquals(-1, ItemSetBitSet.compare(set4, set3)); + + set3.clear(71); + set4.set(101); + + assertEquals(1, ItemSetBitSet.compare(set3, set4)); + assertEquals(-1, ItemSetBitSet.compare(set4, set3)); + } + +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetTraverserTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetTraverserTests.java index d92bc65c02df4..64b142ca7e2ec 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetTraverserTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetTraverserTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ml.aggs.frequentitemsets.TransactionStore.TopItemIds; import org.elasticsearch.xpack.ml.aggs.frequentitemsets.mr.ItemSetMapReduceValueSource.Field; import org.junit.After; @@ -31,11 +32,10 @@ static BigArrays mockBigArrays() { } private HashBasedTransactionStore transactionStore = null; - private ItemSetTraverser it = null; @After public void closeReleasables() throws IOException { - Releasables.close(transactionStore, it); + Releasables.close(transactionStore); } public void testIteration() throws IOException { @@ -60,99 +60,166 @@ public void testIteration() throws IOException { // we don't want to prune transactionStore.prune(0.1); - it = new ItemSetTraverser(transactionStore.getTopItemIds()); - - /** - * items are sorted by frequency: - * d:8, b:7, c:5, a:4, e:3, f:2, g:1 - * this creates the following traversal tree: - * - * 1: d-->b-->c-->a-->e-->f-->g - * 2: | | `->g - * 3: | |`->f-->g - * 4: | `->g - * 5: |`->e-->f-->g - * 6: | `->g - * 7: |`->f-->g - * 8: `->g - * ... - */ - - assertTrue(it.next()); - assertEquals("d", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(1, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("b", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(2, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("c", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(3, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("a", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(4, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("e", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(5, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(6, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(7, it.getNumberOfItems()); - - // branch row 2 - assertTrue(it.next()); - assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(6, it.getNumberOfItems()); - - // branch row 3 - assertTrue(it.next()); - assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(5, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(6, it.getNumberOfItems()); - - // branch row 4 - assertTrue(it.next()); - assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(5, it.getNumberOfItems()); - - // branch row 5 - assertTrue(it.next()); - assertEquals("e", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(4, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(5, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(6, it.getNumberOfItems()); - - // branch row 6 - assertTrue(it.next()); - assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(5, it.getNumberOfItems()); - - // branch row 7 - assertTrue(it.next()); - assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(4, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(5, it.getNumberOfItems()); - - // branch row 8 - assertTrue(it.next()); - assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(4, it.getNumberOfItems()); - - int furtherSteps = 0; - while (it.next()) { - ++furtherSteps; - } - assertEquals(109, furtherSteps); + try (TopItemIds topItemIds = transactionStore.getTopItemIds()) { + ItemSetTraverser it = new ItemSetTraverser(topItemIds); + + /** + * items are sorted by frequency: + * d:8, b:7, c:5, a:4, e:3, f:2, g:1 + * this creates the following traversal tree: + * + * 1: d-->b-->c-->a-->e-->f-->g + * 2: | | `->g + * 3: | |`->f-->g + * 4: | `->g + * 5: |`->e-->f-->g + * 6: | `->g + * 7: |`->f-->g + * 8: `->g + * ... + * + * bit representation: + * d:1, b:2, c:3, a:4, e:5, f:6, g:7 + */ + + assertTrue(it.next()); + assertEquals("d", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(1, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(1)); + assertTrue(it.next()); + assertEquals("b", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(2, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(2)); + assertTrue(it.next()); + assertEquals("c", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(3, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(3)); + assertTrue(it.next()); + assertEquals("a", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(4, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(4)); + assertFalse(it.getParentItemSetBitSet().get(4)); + assertTrue(it.next()); + assertEquals("e", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(5, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(5)); + assertFalse(it.getParentItemSetBitSet().get(5)); + assertTrue(it.getParentItemSetBitSet().get(4)); + assertTrue(it.next()); + assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(6, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(6)); + assertFalse(it.getParentItemSetBitSet().get(6)); + assertTrue(it.getParentItemSetBitSet().get(5)); + assertTrue(it.next()); + assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(7, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(7)); + assertFalse(it.getParentItemSetBitSet().get(7)); + assertTrue(it.getParentItemSetBitSet().get(6)); + + // branch row 2 + it.next(); + // assertTrue(it.next()); + assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(6, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(7)); + assertFalse(it.getItemSetBitSet().get(6)); + assertFalse(it.getParentItemSetBitSet().get(6)); + assertFalse(it.getParentItemSetBitSet().get(7)); + + // branch row 3 + assertTrue(it.next()); + assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(5, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(6)); + assertFalse(it.getItemSetBitSet().get(5)); + assertFalse(it.getItemSetBitSet().get(7)); + assertFalse(it.getParentItemSetBitSet().get(5)); + assertFalse(it.getParentItemSetBitSet().get(6)); + assertTrue(it.next()); + assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(6, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(7)); + assertTrue(it.getItemSetBitSet().get(6)); + assertFalse(it.getItemSetBitSet().get(5)); + assertTrue(it.getParentItemSetBitSet().get(6)); + assertFalse(it.getParentItemSetBitSet().get(7)); + assertFalse(it.getParentItemSetBitSet().get(5)); + + // branch row 4 + assertTrue(it.next()); + assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(5, it.getNumberOfItems()); + + // branch row 5 + assertTrue(it.next()); + assertEquals("e", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(4, it.getNumberOfItems()); + assertTrue(it.next()); + assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(5, it.getNumberOfItems()); + assertTrue(it.next()); + assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(6, it.getNumberOfItems()); + + // branch row 6: "dbceg" + assertTrue(it.next()); + assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(5, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(1)); + assertTrue(it.getItemSetBitSet().get(2)); + assertTrue(it.getItemSetBitSet().get(3)); + assertFalse(it.getItemSetBitSet().get(4)); + assertTrue(it.getItemSetBitSet().get(5)); + assertFalse(it.getItemSetBitSet().get(6)); + assertTrue(it.getItemSetBitSet().get(7)); + + assertTrue(it.getParentItemSetBitSet().get(1)); + assertTrue(it.getParentItemSetBitSet().get(2)); + assertTrue(it.getParentItemSetBitSet().get(3)); + assertFalse(it.getParentItemSetBitSet().get(4)); + assertTrue(it.getParentItemSetBitSet().get(5)); + assertFalse(it.getParentItemSetBitSet().get(6)); + assertFalse(it.getParentItemSetBitSet().get(7)); + + // branch row 7 + assertTrue(it.next()); + assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(4, it.getNumberOfItems()); + assertTrue(it.next()); + assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(5, it.getNumberOfItems()); + + // branch row 8: "dbcg" + assertTrue(it.next()); + assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(4, it.getNumberOfItems()); + + assertTrue(it.getItemSetBitSet().get(1)); + assertTrue(it.getItemSetBitSet().get(2)); + assertTrue(it.getItemSetBitSet().get(3)); + assertFalse(it.getItemSetBitSet().get(4)); + assertFalse(it.getItemSetBitSet().get(5)); + assertFalse(it.getItemSetBitSet().get(6)); + assertTrue(it.getItemSetBitSet().get(7)); + + assertTrue(it.getParentItemSetBitSet().get(1)); + assertTrue(it.getParentItemSetBitSet().get(2)); + assertTrue(it.getParentItemSetBitSet().get(3)); + assertFalse(it.getParentItemSetBitSet().get(4)); + assertFalse(it.getParentItemSetBitSet().get(5)); + assertFalse(it.getParentItemSetBitSet().get(6)); + assertFalse(it.getParentItemSetBitSet().get(7)); + + int furtherSteps = 0; + while (it.next()) { + ++furtherSteps; + } + + assertEquals(109, furtherSteps); + } } public void testPruning() throws IOException { @@ -177,92 +244,133 @@ public void testPruning() throws IOException { // we don't want to prune transactionStore.prune(0.1); - it = new ItemSetTraverser(transactionStore.getTopItemIds()); - - /** - * items are sorted by frequency: - * d:8, b:7, c:5, a:4, e:3, f:2, g:1 - * this creates the following traversal tree: - * - * this item we prune the tree in various places marked with "[", "]" - * - * 1: d-->b-->c-->a-->e[-->f-->g ] - * 2: | | [`->g ] - * 3: | |`->f-->g - * 4: | `->g - * 5: |`->e-->f-->g - * 6: | `->g - * 7: |`->f-->g - * 8: `->g - * ... - */ - - assertTrue(it.next()); - assertEquals("d", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(1, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("b", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(2, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("c", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(3, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("a", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(4, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("e", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(5, it.getNumberOfItems()); - - // now prune the tree - it.prune(); - - // branch row 3 - assertTrue(it.next()); - assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(5, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(6, it.getNumberOfItems()); - - // prune, which actually is ineffective, as we would go up anyway - it.prune(); - - // branch row 4 - assertTrue(it.next()); - assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(5, it.getNumberOfItems()); - - // branch row 5 - assertTrue(it.next()); - assertEquals("e", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(4, it.getNumberOfItems()); - - // prune - it.prune(); - - // branch row 7 - assertTrue(it.next()); - assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(4, it.getNumberOfItems()); - assertTrue(it.next()); - assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); - assertEquals(5, it.getNumberOfItems()); - - // prune aggressively - it.prune(); - it.prune(); - it.prune(); - it.prune(); - it.prune(); - it.prune(); - it.prune(); - - int furtherSteps = 0; - while (it.next()) { - ++furtherSteps; + try (TopItemIds topItemIds = transactionStore.getTopItemIds()) { + ItemSetTraverser it = new ItemSetTraverser(topItemIds); + + /** + * items are sorted by frequency: + * d:8, b:7, c:5, a:4, e:3, f:2, g:1 + * this creates the following traversal tree: + * + * this item we prune the tree in various places marked with "[", "]" + * + * 1: d-->b-->c-->a-->e[-->f-->g ] + * 2: | | [`->g ] + * 3: | |`->f-->g + * 4: | `->g + * 5: |`->e-->f-->g + * 6: | `->g + * 7: |`->f-->g + * 8: `->g + * ... + * + * bit representation: + * d:1, b:2, c:3, a:4, e:5, f:6, g:7 + */ + + assertTrue(it.next()); + assertEquals("d", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(1, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(1)); + assertTrue(it.next()); + assertEquals("b", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(2, it.getNumberOfItems()); + assertTrue(it.next()); + assertEquals("c", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(3, it.getNumberOfItems()); + assertTrue(it.next()); + assertEquals("a", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(4, it.getNumberOfItems()); + assertTrue(it.next()); + assertEquals("e", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(5, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(1)); + assertTrue(it.getItemSetBitSet().get(2)); + assertTrue(it.getItemSetBitSet().get(3)); + assertTrue(it.getItemSetBitSet().get(4)); + assertTrue(it.getItemSetBitSet().get(5)); + assertTrue(it.getParentItemSetBitSet().get(1)); + assertTrue(it.getParentItemSetBitSet().get(2)); + assertTrue(it.getParentItemSetBitSet().get(3)); + assertTrue(it.getParentItemSetBitSet().get(4)); + assertFalse(it.getParentItemSetBitSet().get(5)); + + // now prune the tree + it.prune(); + + // branch row 3 + assertTrue(it.next()); + assertTrue(it.getItemSetBitSet().get(1)); + assertTrue(it.getItemSetBitSet().get(2)); + assertTrue(it.getItemSetBitSet().get(3)); + assertTrue(it.getItemSetBitSet().get(4)); + assertFalse(it.getItemSetBitSet().get(5)); + assertTrue(it.getItemSetBitSet().get(6)); + assertTrue(it.getParentItemSetBitSet().get(1)); + assertTrue(it.getParentItemSetBitSet().get(2)); + assertTrue(it.getParentItemSetBitSet().get(3)); + assertTrue(it.getParentItemSetBitSet().get(4)); + assertFalse(it.getParentItemSetBitSet().get(5)); + assertFalse(it.getParentItemSetBitSet().get(6)); + assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(5, it.getNumberOfItems()); + assertTrue(it.next()); + assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(6, it.getNumberOfItems()); + + // prune, which actually is ineffective, as we would go up anyway + it.prune(); + + // branch row 4 + assertTrue(it.next()); + assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(5, it.getNumberOfItems()); + + // branch row 5 + assertTrue(it.next()); + assertEquals("e", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(4, it.getNumberOfItems()); + + // prune + it.prune(); + + // branch row 7 + assertTrue(it.next()); + assertEquals("f", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(4, it.getNumberOfItems()); + assertTrue(it.getItemSetBitSet().get(1)); + assertTrue(it.getItemSetBitSet().get(2)); + assertTrue(it.getItemSetBitSet().get(3)); + assertFalse(it.getItemSetBitSet().get(4)); + assertFalse(it.getItemSetBitSet().get(5)); + assertTrue(it.getItemSetBitSet().get(6)); + + assertTrue(it.getParentItemSetBitSet().get(1)); + assertTrue(it.getParentItemSetBitSet().get(2)); + assertTrue(it.getParentItemSetBitSet().get(3)); + assertFalse(it.getParentItemSetBitSet().get(4)); + assertFalse(it.getParentItemSetBitSet().get(5)); + assertFalse(it.getParentItemSetBitSet().get(6)); + assertTrue(it.next()); + assertEquals("g", transactionStore.getItem(it.getItemId()).v2()); + assertEquals(5, it.getNumberOfItems()); + + // prune aggressively + it.prune(); + it.prune(); + it.prune(); + it.prune(); + it.prune(); + it.prune(); + it.prune(); + + int furtherSteps = 0; + while (it.next()) { + ++furtherSteps; + } + + assertEquals(0, furtherSteps); } - - assertEquals(0, furtherSteps); } } From 70a7276d77ce74847da50c74acd37584d4d459b2 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 1 Aug 2022 16:06:43 +0200 Subject: [PATCH 030/265] Avoid expensive loop in indicesDeletedFromClusterState() when possible (#88986) The loop over all indices here gets very expensive for large states, we can avoid it often when metadata changes but not the indices maps. --- .../elasticsearch/cluster/ClusterChangedEvent.java | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index 0a67de9c264a1..b400269265224 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -247,13 +247,15 @@ private List indicesDeletedFromClusterState() { final Metadata previousMetadata = previousState.metadata(); final Metadata currentMetadata = state.metadata(); - for (IndexMetadata index : previousMetadata.indices().values()) { - IndexMetadata current = currentMetadata.index(index.getIndex()); - if (current == null) { - if (deleted == null) { - deleted = new HashSet<>(); + if (currentMetadata.indices() != previousMetadata.indices()) { + for (IndexMetadata index : previousMetadata.indices().values()) { + IndexMetadata current = currentMetadata.index(index.getIndex()); + if (current == null) { + if (deleted == null) { + deleted = new HashSet<>(); + } + deleted.add(index.getIndex()); } - deleted.add(index.getIndex()); } } From 83136efd20fc061addbc34840261de35566fc741 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 1 Aug 2022 10:06:52 -0400 Subject: [PATCH 031/265] [ML] address potential bug where trained models get stuck in starting after being allocated to node (#88945) When a model is starting, it has been rarely observed that it will lock up while trying to restore the model objects to the native process. This would manifest as a trained model being stuck in "starting" while also being assigned to a node. So, there is a native process started and task available on the assigned nodes, but the model state never gets out of "starting". --- docs/changelog/88945.yaml | 6 ++ .../TrainedModelAssignmentNodeService.java | 7 +- .../deployment/DeploymentManager.java | 73 +++++++++++-------- .../ChunkedTrainedModelRestorer.java | 27 ++++--- 4 files changed, 71 insertions(+), 42 deletions(-) create mode 100644 docs/changelog/88945.yaml diff --git a/docs/changelog/88945.yaml b/docs/changelog/88945.yaml new file mode 100644 index 0000000000000..a6cb5ed952d6d --- /dev/null +++ b/docs/changelog/88945.yaml @@ -0,0 +1,6 @@ +pr: 88945 +summary: Address potential bug where trained models get stuck in starting after being + allocated to node +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index 1d48f1d1f2297..8c46427f6d249 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -225,14 +225,13 @@ void loadQueuedModels() { } catch (Exception ex) { logger.warn(() -> "[" + modelId + "] Start deployment failed", ex); if (ExceptionsHelper.unwrapCause(ex) instanceof ResourceNotFoundException) { - logger.warn(() -> "[" + modelId + "] Start deployment failed", ex); + logger.debug(() -> "[" + modelId + "] Start deployment failed as model was not found", ex); handleLoadFailure(loadingTask, ExceptionsHelper.missingTrainedModel(modelId, ex)); } else if (ExceptionsHelper.unwrapCause(ex) instanceof SearchPhaseExecutionException) { - logger.trace(() -> "[" + modelId + "] Start deployment failed, will retry", ex); + logger.debug(() -> "[" + modelId + "] Start deployment failed, will retry", ex); // A search phase execution failure should be retried, push task back to the queue loadingToRetry.add(loadingTask); } else { - logger.warn(() -> "[" + modelId + "] Start deployment failed", ex); handleLoadFailure(loadingTask, ex); } } @@ -413,7 +412,7 @@ private void updateNumberOfAllocations(TrainedModelAssignmentMetadata assignment for (TrainedModelAssignment assignment : modelsToUpdate) { TrainedModelDeploymentTask task = modelIdToTask.get(assignment.getModelId()); if (task == null) { - logger.debug(() -> format("[%s] task was removed whilst updating number of allocations", task.getModelId())); + logger.debug(() -> format("[%s] task was removed whilst updating number of allocations", assignment.getModelId())); continue; } RoutingInfo routingInfo = assignment.getNodeRoutingTable().get(nodeId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index 6b984628f3b7b..4e6fe4fc0ca2e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -61,6 +61,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; +import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -149,34 +150,46 @@ private void doStartDeployment(TrainedModelDeploymentTask task, ActionListener { - if (searchVocabResponse.getHits().getHits().length == 0) { - listener.onFailure( - new ResourceNotFoundException( - Messages.getMessage( - Messages.VOCABULARY_NOT_FOUND, - task.getModelId(), - VocabularyConfig.docId(modelConfig.getModelId()) + if (modelConfig.getInferenceConfig()instanceof NlpConfig nlpConfig) { + task.init(nlpConfig); + + SearchRequest searchRequest = vocabSearchRequest(nlpConfig.getVocabularyConfig(), modelConfig.getModelId()); + executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap(searchVocabResponse -> { + if (searchVocabResponse.getHits().getHits().length == 0) { + listener.onFailure( + new ResourceNotFoundException( + Messages.getMessage( + Messages.VOCABULARY_NOT_FOUND, + task.getModelId(), + VocabularyConfig.docId(modelConfig.getModelId()) + ) ) - ) + ); + return; + } + + Vocabulary vocabulary = parseVocabularyDocLeniently(searchVocabResponse.getHits().getAt(0)); + NlpTask nlpTask = new NlpTask(nlpConfig, vocabulary); + NlpTask.Processor processor = nlpTask.createProcessor(); + processContext.nlpTaskProcessor.set(processor); + // here, we are being called back on the searching thread, which MAY be a network thread + // `startAndLoad` creates named pipes, blocking the calling thread, better to execute that in our utility + // executor. + executorServiceForDeployment.execute( + () -> startAndLoad(processContext, modelConfig.getLocation(), modelLoadedListener) ); - return; - } - - Vocabulary vocabulary = parseVocabularyDocLeniently(searchVocabResponse.getHits().getAt(0)); - NlpTask nlpTask = new NlpTask(nlpConfig, vocabulary); - NlpTask.Processor processor = nlpTask.createProcessor(); - processContext.nlpTaskProcessor.set(processor); - // here, we are being called back on the searching thread, which MAY be a network thread - // `startAndLoad` creates named pipes, blocking the calling thread, better to execute that in our utility - // executor. - executorServiceForDeployment.execute(() -> startAndLoad(processContext, modelConfig.getLocation(), modelLoadedListener)); - }, listener::onFailure)); + }, listener::onFailure)); + } else { + listener.onFailure( + new IllegalArgumentException( + format( + "[%s] must be a pytorch model; found inference config of kind [%s]", + modelConfig.getModelId(), + modelConfig.getInferenceConfig().getWriteableName() + ) + ) + ); + } }, listener::onFailure); executeAsyncWithOrigin( @@ -404,10 +417,12 @@ private Consumer onProcessCrash() { } void loadModel(TrainedModelLocation modelLocation, ActionListener listener) { - if (modelLocation instanceof IndexLocation) { - process.get().loadModel(task.getModelId(), ((IndexLocation) modelLocation).getIndexName(), stateStreamer, listener); + if (modelLocation instanceof IndexLocation indexLocation) { + process.get().loadModel(task.getModelId(), indexLocation.getIndexName(), stateStreamer, listener); } else { - throw new IllegalStateException("unsupported trained model location [" + modelLocation.getClass().getSimpleName() + "]"); + listener.onFailure( + new IllegalStateException("unsupported trained model location [" + modelLocation.getClass().getSimpleName() + "]") + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java index 40d0162e15911..2c440941b5224 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java @@ -10,11 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.CheckedFunction; @@ -38,8 +38,10 @@ import java.util.concurrent.ExecutorService; import java.util.function.Consumer; +import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.ml.MachineLearning.NATIVE_INFERENCE_COMMS_THREAD_POOL_NAME; +import static org.elasticsearch.xpack.ml.MachineLearning.UTILITY_THREAD_POOL_NAME; /** * Searches for and emits {@link TrainedModelDefinitionDoc}s in @@ -71,7 +73,7 @@ public ChunkedTrainedModelRestorer( ExecutorService executorService, NamedXContentRegistry xContentRegistry ) { - this.client = client; + this.client = new OriginSettingClient(client, ML_ORIGIN); this.executorService = executorService; this.xContentRegistry = xContentRegistry; this.modelId = modelId; @@ -122,7 +124,6 @@ public void restoreModelDefinition( logger.debug("[{}] restoring model", modelId); SearchRequest searchRequest = buildSearch(client, modelId, index, searchSize, null); - executorService.execute(() -> doSearch(searchRequest, modelConsumer, successConsumer, errorConsumer)); } @@ -132,8 +133,16 @@ private void doSearch( Consumer successConsumer, Consumer errorConsumer ) { - - executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap(searchResponse -> { + try { + assert Thread.currentThread().getName().contains(NATIVE_INFERENCE_COMMS_THREAD_POOL_NAME) + || Thread.currentThread().getName().contains(UTILITY_THREAD_POOL_NAME) + : format( + "Must execute from [%s] or [%s] but thread is [%s]", + NATIVE_INFERENCE_COMMS_THREAD_POOL_NAME, + UTILITY_THREAD_POOL_NAME, + Thread.currentThread().getName() + ); + SearchResponse searchResponse = client.search(searchRequest).actionGet(); if (searchResponse.getHits().getHits().length == 0) { errorConsumer.accept(new ResourceNotFoundException(Messages.getMessage(Messages.MODEL_DEFINITION_NOT_FOUND, modelId))); return; @@ -182,13 +191,13 @@ private void doSearch( searchRequestBuilder.searchAfter(new Object[] { lastHit.getIndex(), lastNum }); executorService.execute(() -> doSearch(searchRequestBuilder.request(), modelConsumer, successConsumer, errorConsumer)); } - }, e -> { + } catch (Exception e) { if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { errorConsumer.accept(new ResourceNotFoundException(Messages.getMessage(Messages.MODEL_DEFINITION_NOT_FOUND, modelId))); } else { errorConsumer.accept(e); } - })); + } } private static SearchRequestBuilder buildSearchBuilder(Client client, String modelId, String index, int searchSize) { From 579692d5a35f94bce44de2bb86ae55781651d4d9 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 1 Aug 2022 09:29:21 -0500 Subject: [PATCH 032/265] Fix race conditions in master stability polling (#88874) This fixes some possible race conditions in the cluster formation polling of the stable master code. It also prevents the list of tasks from growing indefinitely. --- .../CoordinationDiagnosticsServiceIT.java | 95 ++++++++++++++ .../CoordinationDiagnosticsService.java | 100 ++++++++++----- .../CoordinationDiagnosticsServiceTests.java | 119 ++++++------------ 3 files changed, 204 insertions(+), 110 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java new file mode 100644 index 0000000000000..9f4d1fad8eef3 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.coordination; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.BlockClusterStateProcessing; +import org.elasticsearch.threadpool.Scheduler; +import org.junit.Before; + +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.emptyOrNullString; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class CoordinationDiagnosticsServiceIT extends ESIntegTestCase { + @Before + private void setBootstrapMasterNodeIndex() { + internalCluster().setBootstrapMasterNodeIndex(0); + } + + public void testBlockClusterStateProcessingOnOneNode() throws Exception { + /* + * This test picks a node that is not elected master, and then blocks cluster state processing on it. The reason is so that we + * can call CoordinationDiagnosticsService#beginPollingClusterFormationInfo without a cluster changed event resulting in the + * values we pass in being overwritten. + */ + final List nodeNames = internalCluster().startNodes(3); + + final String master = internalCluster().getMasterName(); + assertThat(nodeNames, hasItem(master)); + String blockedNode = nodeNames.stream().filter(n -> n.equals(master) == false).findAny().get(); + assertNotNull(blockedNode); + + DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, master).state().nodes(); + Set nodesWithoutBlockedNode = discoveryNodes.getNodes() + .values() + .stream() + .filter(n -> n.getName().equals(blockedNode) == false) + .collect(Collectors.toSet()); + + BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(blockedNode, random()); + internalCluster().setDisruptionScheme(disruption); + // stop processing cluster state changes + disruption.startDisrupting(); + + CoordinationDiagnosticsService diagnosticsOnBlockedNode = internalCluster().getInstance( + CoordinationDiagnosticsService.class, + blockedNode + ); + ConcurrentMap nodeToClusterFormationStateMap = + new ConcurrentHashMap<>(); + ConcurrentHashMap cancellables = new ConcurrentHashMap<>(); + diagnosticsOnBlockedNode.clusterFormationResponses = nodeToClusterFormationStateMap; + diagnosticsOnBlockedNode.clusterFormationInfoTasks = cancellables; + + diagnosticsOnBlockedNode.beginPollingClusterFormationInfo( + nodesWithoutBlockedNode, + nodeToClusterFormationStateMap::put, + cancellables + ); + + // while the node is blocked from processing cluster state changes it should reach out to the other 2 + // master eligible nodes and get a successful response + assertBusy(() -> { + assertThat(cancellables.size(), is(2)); + assertThat(nodeToClusterFormationStateMap.size(), is(2)); + nodesWithoutBlockedNode.forEach(node -> { + CoordinationDiagnosticsService.ClusterFormationStateOrException result = nodeToClusterFormationStateMap.get(node); + assertNotNull(result); + assertNotNull(result.clusterFormationState()); + assertNull(result.exception()); + ClusterFormationFailureHelper.ClusterFormationState clusterFormationState = result.clusterFormationState(); + assertThat(clusterFormationState.getDescription(), not(emptyOrNullString())); + }); + }); + + disruption.stopDisrupting(); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index 2e0303dbd5b4c..9e0b266697e69 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -48,7 +48,6 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -86,12 +85,13 @@ public class CoordinationDiagnosticsService implements ClusterStateListener { private final int unacceptableIdentityChanges; /* - * This is a list of tasks that are periodically reaching out to other master eligible nodes to get their ClusterFormationStates for - * diagnosis. + * This is a Map of tasks that are periodically reaching out to other master eligible nodes to get their ClusterFormationStates for + * diagnosis. The key is the DisoveryNode for the master eligible node being polled, and the value is a Cancellable. * The field is accessed (reads/writes) from multiple threads, but the reference itself is only ever changed on the cluster change * event thread. */ - private volatile List clusterFormationInfoTasks = null; + // Non-private for testing + volatile Map clusterFormationInfoTasks = null; /* * This field holds the results of the tasks in the clusterFormationInfoTasks field above. The field is accessed (reads/writes) from * multiple threads, but the reference itself is only ever changed on the cluster change event thread. @@ -612,9 +612,9 @@ public void clusterChanged(ClusterChangedEvent event) { } if (currentMaster == null && clusterService.localNode().isMasterNode()) { /* - * This begins polling all master-eligible nodes for cluster formation information. However there's a 10-second delay before it - * starts, so in the normal situation where during a master transition it flips from master1 -> null -> master2, it the - * polling tasks will be canceled before any requests are actually made. + * This begins polling all master-eligible nodes for cluster formation information. However there's a 10-second delay + * before it starts, so in the normal situation where during a master transition it flips from master1 -> null -> + * master2 the polling tasks will be canceled before any requests are actually made. */ beginPollingClusterFormationInfo(); } else { @@ -626,14 +626,18 @@ public void clusterChanged(ClusterChangedEvent event) { * This method begins polling all known master-eligible nodes for cluster formation information. After a 10-second initial delay, it * polls each node every 10 seconds until cancelPollingClusterFormationInfo() is called. */ - private void beginPollingClusterFormationInfo() { + void beginPollingClusterFormationInfo() { assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME); cancelPollingClusterFormationInfo(); ConcurrentMap responses = new ConcurrentHashMap<>(); - List cancellables = new CopyOnWriteArrayList<>(); - beginPollingClusterFormationInfo(getMasterEligibleNodes(), responses::put, cancellables::add); - clusterFormationResponses = responses; + Map cancellables = new ConcurrentHashMap<>(); + /* + * Assignment of clusterFormationInfoTasks must be done before the call to beginPollingClusterFormationInfo because it is used + * asynchronously by rescheduleFetchConsumer, called from beginPollingClusterFormationInfo. + */ clusterFormationInfoTasks = cancellables; + clusterFormationResponses = responses; + beginPollingClusterFormationInfo(getMasterEligibleNodes(), responses::put, cancellables); } /** @@ -641,21 +645,22 @@ private void beginPollingClusterFormationInfo() { * repeats doing that until cancel() is called on all of the Cancellable that this method inserts into cancellables. This method * exists (rather than being just part of the beginPollingClusterFormationInfo() above) in order to facilitate unit testing. * @param nodeResponseConsumer A consumer for any results produced for a node by this method - * @param cancellableConsumer A consumer for any Cancellable tasks produced by this method + * @param cancellables The Map of Cancellables, one for each node being polled */ // Non-private for testing void beginPollingClusterFormationInfo( Collection masterEligibleNodes, BiConsumer nodeResponseConsumer, - Consumer cancellableConsumer + Map cancellables ) { masterEligibleNodes.forEach(masterEligibleNode -> { Consumer responseConsumer = result -> nodeResponseConsumer.accept(masterEligibleNode, result); try { - cancellableConsumer.accept( + cancellables.put( + masterEligibleNode, fetchClusterFormationInfo( masterEligibleNode, - responseConsumer.andThen(rescheduleFetchConsumer(masterEligibleNode, responseConsumer, cancellableConsumer)) + responseConsumer.andThen(rescheduleFetchConsumer(masterEligibleNode, responseConsumer, cancellables)) ) ); } catch (EsRejectedExecutionException e) { @@ -673,38 +678,69 @@ void beginPollingClusterFormationInfo( * completed, adding the resulting Cancellable to cancellableConsumer. * @param masterEligibleNode The node being polled * @param responseConsumer The response consumer to be wrapped - * @param cancellableConsumer The list of Cancellables + * @param cancellables The Map of Cancellables, one for each node being polled * @return */ private Consumer rescheduleFetchConsumer( DiscoveryNode masterEligibleNode, Consumer responseConsumer, - Consumer cancellableConsumer + Map cancellables ) { return response -> { - try { - cancellableConsumer.accept( - fetchClusterFormationInfo( - masterEligibleNode, - responseConsumer.andThen(rescheduleFetchConsumer(masterEligibleNode, responseConsumer, cancellableConsumer)) - ) - ); - } catch (EsRejectedExecutionException e) { - if (e.isExecutorShutdown()) { - logger.trace("Not rescheduling request for cluster coordination info because this node is being shutdown", e); + /* + * If clusterFormationInfoTasks is null, that means that cancelPollingClusterFormationInfo() has been called, so we don't + * want to run anything new, and we want to cancel anything that might still be running in our cancellables just to be safe. + */ + if (clusterFormationInfoTasks != null) { + /* + * If cancellables is not the same as clusterFormationInfoTasks, that means that the current polling track has been + * cancelled and a new polling track has been started. So we don't want to run anything new, and we want to cancel + * anything that might still be running in our cancellables just to be safe. Note that it is possible for + * clusterFormationInfoTasks to be null at this point (since it is assigned in a different thread), so it is important + * that we don't call equals on it. + */ + if (cancellables.equals(clusterFormationInfoTasks)) { + /* + * As mentioned in the comment in cancelPollingClusterFormationInfo(), there is a slim possibility here that we will + * add a task here for a poll that has already been cancelled. But when it completes and runs rescheduleFetchConsumer() + * we will then see that clusterFormationInfoTasks does not equal cancellables, so it will not be run again. + */ + try { + cancellables.put( + masterEligibleNode, + fetchClusterFormationInfo( + masterEligibleNode, + responseConsumer.andThen(rescheduleFetchConsumer(masterEligibleNode, responseConsumer, cancellables)) + ) + ); + } catch (EsRejectedExecutionException e) { + if (e.isExecutorShutdown()) { + logger.trace("Not rescheduling request for cluster coordination info because this node is being shutdown", e); + } else { + throw e; + } + } } else { - throw e; + cancellables.values().forEach(Scheduler.Cancellable::cancel); } + } else { + cancellables.values().forEach(Scheduler.Cancellable::cancel); } }; } - private void cancelPollingClusterFormationInfo() { + void cancelPollingClusterFormationInfo() { assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME); - if (clusterFormationResponses != null) { - clusterFormationInfoTasks.forEach(Scheduler.Cancellable::cancel); - clusterFormationResponses = null; + if (clusterFormationInfoTasks != null) { + /* + * There is a slight risk here that a new Cancellable is added to clusterFormationInfoTasks after we begin iterating in the next + * line. We are calling this an acceptable risk because it will result in an un-cancelled un-cancellable task, but it will not + * reschedule itself so it will not be around long. It is possible that cancel() will be called on a Cancellable concurrently + * by multiple threads, but that will not cause any problems. + */ + clusterFormationInfoTasks.values().forEach(Scheduler.Cancellable::cancel); clusterFormationInfoTasks = null; + clusterFormationResponses = null; } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java index bf465fec9a368..fa05f6e629fff 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.monitor.StatusInfo; @@ -27,6 +28,7 @@ import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.hamcrest.Matchers; import org.junit.Before; import java.io.IOException; @@ -40,7 +42,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import static org.elasticsearch.cluster.coordination.AbstractCoordinatorTestCase.Cluster.EXTREME_DELAY_VARIABILITY; @@ -906,82 +907,44 @@ private ClusterFormationFailureHelper.ClusterFormationState getClusterFormationS ); } - public void testBeginPollingClusterFormationInfo() { - /* - * This test sets up a 4-node cluster (3 master eligible). We call beginPollingClusterFormationInfo() on each node. This is allowed - * to run for a bit, and then we assert that we have cluster formation information from each master eligible node. Then we - * disconnect a random master eligible node, allow the polling to continue to run (we never cancelled it), and assert that we - * have the expected exceptions in the polling results. - */ - try (Cluster cluster = new Cluster(3, true, Settings.EMPTY)) { - createAndAddNonMasterNode(cluster); - cluster.runRandomly(); - cluster.stabilise(); - List masterNodes = cluster.clusterNodes.stream() - .map(Cluster.ClusterNode::getLocalNode) - .filter(DiscoveryNode::isMasterNode) - .toList(); - cluster.clusterNodes.stream().filter(node -> node.getLocalNode().isMasterNode()).forEach(node -> { - ConcurrentMap nodeToClusterFormationStateMap = new ConcurrentHashMap<>(); - node.coordinationDiagnosticsService.beginPollingClusterFormationInfo( - masterNodes, - nodeToClusterFormationStateMap::put, - cancellable -> {} - ); + public void testBeginPollingClusterFormationInfo() throws Exception { + MasterHistoryService masterHistoryService = createMasterHistoryService(); + var clusterService = mock(ClusterService.class); + when(clusterService.getSettings()).thenReturn(Settings.EMPTY); + when(clusterService.state()).thenReturn(nullMasterClusterState); + DiscoveryNode localNode = node3; + when(clusterService.localNode()).thenReturn(localNode); + Coordinator coordinator = mock(Coordinator.class); + when(coordinator.getFoundPeers()).thenReturn(List.of(node1, node2, localNode)); + DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(); + ThreadPool threadPool = deterministicTaskQueue.getThreadPool(); - cluster.runRandomly(false, true, EXTREME_DELAY_VARIABILITY); - cluster.stabilise(); + TransportService transportService = mock(TransportService.class); + when(transportService.getThreadPool()).thenReturn(threadPool); + CoordinationDiagnosticsService coordinationDiagnosticsService = new CoordinationDiagnosticsService( + clusterService, + transportService, + coordinator, + masterHistoryService + ); - /* - * The cluster has now run normally for some period of time, so check that the outputs of - * beginPollingClusterFormationInfo() are present with no exceptions: - */ - assertThat(nodeToClusterFormationStateMap.size(), equalTo(masterNodes.size())); - masterNodes.stream().filter(masterNode -> node.getLocalNode().equals(masterNode) == false).forEach(masterNode -> { - ClusterFormationStateOrException clusterFormationStateOrException = nodeToClusterFormationStateMap.get(masterNode); - assertNotNull(clusterFormationStateOrException); - assertNotNull(clusterFormationStateOrException.clusterFormationState()); - assertNull(clusterFormationStateOrException.exception()); - ClusterFormationFailureHelper.ClusterFormationState clusterFormationState = clusterFormationStateOrException - .clusterFormationState(); - assertThat(clusterFormationState.getDescription(), not(emptyOrNullString())); - }); - - /* - * Now we disconnect a random node, simulate running the cluster for a little while, and make sure that the results of - * beginPollingClusterFormationInfo() contain the expected exceptions. - */ - Cluster.ClusterNode nodeToDisconnect = cluster.clusterNodes.stream() - .filter(clusterNode -> clusterNode.getLocalNode().isMasterNode()) - .findAny() - .get(); - nodeToDisconnect.disconnect(); - cluster.stabilise(); - assertThat(nodeToClusterFormationStateMap.size(), equalTo(masterNodes.size())); - AtomicInteger exceptions = new AtomicInteger(); - masterNodes.stream().filter(masterNode -> node.getLocalNode().equals(masterNode) == false).forEach(masterNode -> { - ClusterFormationStateOrException clusterFormationStateOrException = nodeToClusterFormationStateMap.get(masterNode); - assertNotNull(clusterFormationStateOrException); - if (clusterFormationStateOrException.clusterFormationState() != null) { - assertNull(clusterFormationStateOrException.exception()); - ClusterFormationFailureHelper.ClusterFormationState clusterFormationState = clusterFormationStateOrException - .clusterFormationState(); - assertThat(clusterFormationState.getDescription(), not(emptyOrNullString())); - } else { - assertNotNull(clusterFormationStateOrException.exception()); - exceptions.getAndIncrement(); - } - }); - if (node.equals(nodeToDisconnect)) { - // If this was the disconnected node, it will have encountered exceptions contacting all nodes except itself: - assertThat(exceptions.get(), equalTo(masterNodes.size() - 1)); - } else { - // Other nodes will only have encountered an exception contacting the disconnected node: - assertThat(exceptions.get(), equalTo(1)); - } - nodeToDisconnect.heal(); - }); - } + coordinationDiagnosticsService.beginPollingClusterFormationInfo(); + assertThat(coordinationDiagnosticsService.clusterFormationInfoTasks.size(), equalTo(3)); + coordinationDiagnosticsService.cancelPollingClusterFormationInfo(); + assertThat(coordinationDiagnosticsService.clusterFormationInfoTasks, Matchers.nullValue()); + coordinationDiagnosticsService.clusterChanged( + new ClusterChangedEvent(TEST_SOURCE, nullMasterClusterState, node1MasterClusterState) + ); + assertThat(coordinationDiagnosticsService.clusterFormationInfoTasks.size(), equalTo(3)); + coordinationDiagnosticsService.clusterChanged( + new ClusterChangedEvent(TEST_SOURCE, node1MasterClusterState, nullMasterClusterState) + ); + assertThat(coordinationDiagnosticsService.clusterFormationInfoTasks, Matchers.nullValue()); + /* + * Note that in this test we will never find any values in clusterFormationResponses because transportService is mocked out. + * There is not a reasonable way to plug in a transportService to this simple unit test, so testing that is left to an + * integration test. + */ } public void testBeginPollingClusterFormationInfoCancel() { @@ -1002,13 +965,13 @@ public void testBeginPollingClusterFormationInfoCancel() { .toList(); cluster.clusterNodes.stream().filter(node -> node.getLocalNode().isMasterNode()).forEach(node -> { ConcurrentMap nodeToClusterFormationStateMap = new ConcurrentHashMap<>(); - List cancellables = new ArrayList<>(); + Map cancellables = new ConcurrentHashMap<>(); node.coordinationDiagnosticsService.beginPollingClusterFormationInfo( masterNodes, nodeToClusterFormationStateMap::put, - cancellables::add + cancellables ); - cancellables.forEach(Scheduler.Cancellable::cancel); // This is what will most often happen in practice + cancellables.values().forEach(Scheduler.Cancellable::cancel); // This is what will most often happen in practice cluster.runRandomly(false, true, EXTREME_DELAY_VARIABILITY); cluster.stabilise(); assertThat(nodeToClusterFormationStateMap.size(), equalTo(0)); // Everything was cancelled From 3420be0ca5a2fdeb7e91314f01a62bcd72d5840f Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 1 Aug 2022 09:17:50 -0600 Subject: [PATCH 033/265] Fix renaming data streams with CCR replication (#88875) This commit fixes the situation where a user wants to use CCR to replicate indices that are part of a data stream while renaming the data stream. For example, assume a user has an auto-follow request that looks like this: ``` PUT /_ccr/auto_follow/my-auto-follow-pattern { "remote_cluster" : "other-cluster", "leader_index_patterns" : ["logs-*"], "follow_index_pattern" : "{{leader_index}}_copy" } ``` And then the data stream `logs-mysql-error` was created, creating the backing index `.ds-logs-mysql-error-2022-07-29-000001`. Prior to this commit, replicating this data stream means that the backing index would be renamed to `.ds-logs-mysql-error-2022-07-29-000001_copy` and the data stream would *not* be renamed. This caused a check to trip in `TransportPutLifecycleAction` asserting that a backing index was not renamed for a data stream during following. After this commit, there are a couple of changes: First, the data stream will also be renamed. This means that the `logs-mysql-error` becomes `logs-mysql-error_copy` when created on the follower cluster. Because of the way that CCR works, this means we need to support renaming a data stream for a regular "create follower" request, so a new parameter has been added: `data_stream_name`. It works like this: ``` PUT /mynewindex/_ccr/follow { "remote_cluster": "other-cluster", "leader_index": "myotherindex", "data_stream_name": "new_ds" } ``` Second, the backing index for a data stream must be renamed in a way that does not break the parsing of a data stream backing pattern, whereas previously the index `.ds-logs-mysql-error-2022-07-29-000001` would be renamed to `.ds-logs-mysql-error-2022-07-29-000001_copy` (an illegal name since it doesn't end with the rollover digit), after this commit it will be renamed to `.ds-logs-mysql-error_copy-2022-07-29-000001` to match the renamed data stream. This means that for the given `follow_index_pattern` of `{{leader_index}}_copy` the index changes look like: | Leader Cluster | Follower Cluster | |--------------|-----------| | `logs-mysql-error` (data stream) | `logs-mysql-error_copy` (data stream) | | `.ds-logs-mysql-error-2022-07-29-000001` | `.ds-logs-mysql-error_copy-2022-07-29-000001` | Which internally means the auto-follow request turned into the create follower request of: ``` PUT /.ds-logs-mysql-error_copy-2022-07-29-000001/_ccr/follow { "remote_cluster": "other-cluster", "leader_index": ".ds-logs-mysql-error-2022-07-29-000001", "data_stream_name": "logs-mysql-error_copy" } ``` Relates to https://github.com/elastic/elasticsearch/pull/84940 (cherry-picked the commit for a test) Relates to https://github.com/elastic/elasticsearch/pull/61993 (where data stream support was first introduced for CCR) Resolves https://github.com/elastic/elasticsearch/issues/81751 --- docs/changelog/88875.yaml | 6 + .../put-auto-follow-pattern.asciidoc | 13 +- .../ccr/apis/follow/put-follow.asciidoc | 20 ++ .../elasticsearch/xpack/ccr/AutoFollowIT.java | 129 ++++++- .../xpack/ccr/FollowIndexIT.java | 20 -- .../xpack/ccr/FollowIndexSecurityIT.java | 2 +- .../xpack/ccr/ESCCRRestTestCase.java | 11 +- .../ccr/action/AutoFollowCoordinator.java | 128 ++++++- .../ccr/action/TransportPutFollowAction.java | 57 ++- .../action/AutoFollowCoordinatorTests.java | 328 ++++++++++++++++++ .../ccr/action/FollowParametersTests.java | 5 + .../action/PutFollowActionRequestTests.java | 37 ++ .../action/TransportPutFollowActionTests.java | 28 +- .../core/ccr/action/PutFollowAction.java | 37 +- 14 files changed, 753 insertions(+), 68 deletions(-) create mode 100644 docs/changelog/88875.yaml diff --git a/docs/changelog/88875.yaml b/docs/changelog/88875.yaml new file mode 100644 index 0000000000000..0643e86a6dfe7 --- /dev/null +++ b/docs/changelog/88875.yaml @@ -0,0 +1,6 @@ +pr: 88875 +summary: Fix renaming data streams with CCR replication +area: "Data streams" +type: bug +issues: + - 81751 diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index ed377e72fce49..3876cab007d90 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -85,11 +85,14 @@ the new patterns. more `leader_index_patterns` and one or more `leader_index_exclusion_patterns` won't be followed. `follow_index_pattern`:: - (Optional, string) The name of follower index. The template `{{leader_index}}` - can be used to derive the name of the follower index from the name of the - leader index. When following a data stream, use `{{leader_index}}`; {ccr-init} - does not support changes to the names of a follower data stream's backing - indices. + (Optional, string) The name of follower index. The template `{{leader_index}}` can be used to + derive the name of the follower index from the name of the leader index. When following a data + stream, the `follow_index_pattern` will be used for renaming not only the leader index, but also + the data stream containing the leader index. For example, a data stream called + `logs-mysql-default` with a backing index of `.ds-logs-mysql-default-2022-01-01-000001` and a + `follow_index_pattern` of `{{leader_index}}_copy` will replicate the data stream as + `logs-mysql-default_copy` and the backing index as + `.ds-logs-mysql-default_copy-2022-01-01-000001`. include::../follow-request-body.asciidoc[] diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index d09eb51534042..93e8a710751a8 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -76,6 +76,26 @@ referenced leader index. When this API returns, the follower index exists, and (Required, string) The <> containing the leader index. +[[ccr-put-follow-request-body-data_stream_name]]`data_stream_name`:: + (Optional, string) If the leader index is part of a <>, the name to + which the local data stream for the followed index should be renamed. For example, A request like: + +[source,console] +-------------------------------------------------- +PUT /.ds-logs-mysql-default_copy-2022-01-01-000001/_ccr/follow +{ + "remote_cluster" : "remote_cluster", + "leader_index" : ".ds-logs-mysql-default-2022-01-01-000001", + "data_stream_name": "logs-mysql-default_copy" +} +-------------------------------------------------- +// TEST[skip:no setup] + +Replicates the leader index `.ds-logs-mysql-default-2022-01-01-000001` into the follower index +`.ds-logs-mysql-default_copy-2022-01-01-000001` and will do so using the data stream +`logs-mysql-default_copy`, as opposed to the original leader data stream name of +`logs-mysql-default`. + include::../follow-request-body.asciidoc[] [[ccr-put-follow-examples]] diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index 35fac474d86f3..ffdd40a1bd844 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -236,7 +236,7 @@ public void testDataStreams() throws Exception { int initialNumberOfSuccessfulFollowedIndices = getNumberOfSuccessfulFollowedIndices(); try { // Create auto follow pattern - createAutoFollowPattern(client(), autoFollowPatternName, "logs-mysql-*", "leader_cluster"); + createAutoFollowPattern(client(), autoFollowPatternName, "logs-mysql-*", "leader_cluster", null); // Create data stream and ensure that is is auto followed try (RestClient leaderClient = buildLeaderClient()) { @@ -320,6 +320,121 @@ public void testDataStreams() throws Exception { } } + public void testDataStreamsRenameFollowDataStream() throws Exception { + if ("follow".equals(targetCluster) == false) { + return; + } + + final int numDocs = 64; + final String dataStreamName = "logs-mysql-error"; + final String dataStreamNameFollower = "logs-mysql-error_copy"; + final String autoFollowPatternName = getTestName().toLowerCase(Locale.ROOT); + + int initialNumberOfSuccessfulFollowedIndices = getNumberOfSuccessfulFollowedIndices(); + try { + // Create auto follow pattern + createAutoFollowPattern(client(), autoFollowPatternName, "logs-mysql-*", "leader_cluster", "{{leader_index}}_copy"); + + // Create data stream and ensure that is is auto followed + try (RestClient leaderClient = buildLeaderClient()) { + for (int i = 0; i < numDocs; i++) { + Request indexRequest = new Request("POST", "/" + dataStreamName + "/_doc"); + indexRequest.addParameter("refresh", "true"); + indexRequest.setJsonEntity("{\"@timestamp\": \"" + DATE_FORMAT.format(new Date()) + "\",\"message\":\"abc\"}"); + assertOK(leaderClient.performRequest(indexRequest)); + } + verifyDataStream(leaderClient, dataStreamName, backingIndexName(dataStreamName, 1)); + verifyDocuments(leaderClient, dataStreamName, numDocs); + } + logger.info( + "--> checking {} with index {} has been auto followed to {} with backing index {}", + dataStreamName, + backingIndexName(dataStreamName, 1), + dataStreamNameFollower, + backingIndexName(dataStreamNameFollower, 1) + ); + assertBusy(() -> { + assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(initialNumberOfSuccessfulFollowedIndices + 1)); + verifyDataStream(client(), dataStreamNameFollower, backingIndexName(dataStreamNameFollower, 1)); + ensureYellow(dataStreamNameFollower); + verifyDocuments(client(), dataStreamNameFollower, numDocs); + }); + + // First rollover and ensure second backing index is replicated: + logger.info("--> rolling over"); + try (RestClient leaderClient = buildLeaderClient()) { + Request rolloverRequest = new Request("POST", "/" + dataStreamName + "/_rollover"); + assertOK(leaderClient.performRequest(rolloverRequest)); + verifyDataStream(leaderClient, dataStreamName, backingIndexName(dataStreamName, 1), backingIndexName(dataStreamName, 2)); + + Request indexRequest = new Request("POST", "/" + dataStreamName + "/_doc"); + indexRequest.addParameter("refresh", "true"); + indexRequest.setJsonEntity("{\"@timestamp\": \"" + DATE_FORMAT.format(new Date()) + "\",\"message\":\"abc\"}"); + assertOK(leaderClient.performRequest(indexRequest)); + verifyDocuments(leaderClient, dataStreamName, numDocs + 1); + } + assertBusy(() -> { + assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(initialNumberOfSuccessfulFollowedIndices + 2)); + verifyDataStream( + client(), + dataStreamNameFollower, + backingIndexName(dataStreamNameFollower, 1), + backingIndexName(dataStreamNameFollower, 2) + ); + ensureYellow(dataStreamNameFollower); + verifyDocuments(client(), dataStreamNameFollower, numDocs + 1); + }); + + // Second rollover and ensure third backing index is replicated: + logger.info("--> rolling over"); + try (RestClient leaderClient = buildLeaderClient()) { + Request rolloverRequest = new Request("POST", "/" + dataStreamName + "/_rollover"); + assertOK(leaderClient.performRequest(rolloverRequest)); + verifyDataStream( + leaderClient, + dataStreamName, + backingIndexName(dataStreamName, 1), + backingIndexName(dataStreamName, 2), + backingIndexName(dataStreamName, 3) + ); + + Request indexRequest = new Request("POST", "/" + dataStreamName + "/_doc"); + indexRequest.addParameter("refresh", "true"); + indexRequest.setJsonEntity("{\"@timestamp\": \"" + DATE_FORMAT.format(new Date()) + "\",\"message\":\"abc\"}"); + assertOK(leaderClient.performRequest(indexRequest)); + verifyDocuments(leaderClient, dataStreamName, numDocs + 2); + } + assertBusy(() -> { + assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(initialNumberOfSuccessfulFollowedIndices + 3)); + verifyDataStream( + client(), + dataStreamNameFollower, + backingIndexName(dataStreamNameFollower, 1), + backingIndexName(dataStreamNameFollower, 2), + backingIndexName(dataStreamNameFollower, 3) + ); + ensureYellow(dataStreamNameFollower); + verifyDocuments(client(), dataStreamNameFollower, numDocs + 2); + }); + + } finally { + cleanUpFollower( + List.of( + backingIndexName(dataStreamNameFollower, 1), + backingIndexName(dataStreamNameFollower, 2), + backingIndexName(dataStreamNameFollower, 3) + ), + List.of(dataStreamNameFollower), + List.of(autoFollowPatternName) + ); + cleanUpLeader( + List.of(backingIndexName(dataStreamName, 1), backingIndexName(dataStreamName, 2), backingIndexName(dataStreamName, 3)), + List.of(dataStreamName), + List.of() + ); + } + } + public void testDataStreams_autoFollowAfterDataStreamCreated() throws Exception { if ("follow".equals(targetCluster) == false) { return; @@ -353,7 +468,7 @@ public void testDataStreams_autoFollowAfterDataStreamCreated() throws Exception } // Create auto follow pattern - createAutoFollowPattern(client(), autoFollowPatternName, dataStreamName + "*", "leader_cluster"); + createAutoFollowPattern(client(), autoFollowPatternName, dataStreamName + "*", "leader_cluster", null); // Rollover and ensure only second backing index is replicated: try (RestClient leaderClient = buildLeaderClient()) { @@ -410,7 +525,7 @@ public void testRolloverDataStreamInFollowClusterForbidden() throws Exception { List backingIndexNames = null; try { // Create auto follow pattern - createAutoFollowPattern(client(), autoFollowPatternName, "logs-tomcat-*", "leader_cluster"); + createAutoFollowPattern(client(), autoFollowPatternName, "logs-tomcat-*", "leader_cluster", null); // Create data stream and ensure that is is auto followed try (var leaderClient = buildLeaderClient()) { @@ -531,7 +646,7 @@ public void testRolloverAliasInFollowClusterForbidden() throws Exception { int initialNumberOfSuccessfulFollowedIndices = getNumberOfSuccessfulFollowedIndices(); try { // Create auto follow pattern - createAutoFollowPattern(client(), "test_pattern", "log-*", "leader_cluster"); + createAutoFollowPattern(client(), "test_pattern", "log-*", "leader_cluster", null); // Create leader index and write alias: try (var leaderClient = buildLeaderClient()) { @@ -618,7 +733,7 @@ public void testDataStreamsBiDirectionalReplication() throws Exception { try { // Create auto follow pattern in follow cluster - createAutoFollowPattern(client(), "id1", "logs-*-eu", "leader_cluster"); + createAutoFollowPattern(client(), "id1", "logs-*-eu", "leader_cluster", null); // Create auto follow pattern in leader cluster: try (var leaderClient = buildLeaderClient()) { @@ -658,7 +773,7 @@ public void testDataStreamsBiDirectionalReplication() throws Exception { } assertOK(leaderClient.performRequest(request)); // Then create the actual auto follow pattern: - createAutoFollowPattern(leaderClient, "id2", "logs-*-na", "follower_cluster"); + createAutoFollowPattern(leaderClient, "id2", "logs-*-na", "follower_cluster", null); } var numDocs = 128; @@ -832,7 +947,7 @@ public void testAutoFollowSearchableSnapshotsFails() throws Exception { final String mountedIndex = testPrefix + "-mounted"; try { - createAutoFollowPattern(client(), autoFollowPattern, testPrefix + "-*", "leader_cluster"); + createAutoFollowPattern(client(), autoFollowPattern, testPrefix + "-*", "leader_cluster", null); // Create a regular index on leader try (var leaderClient = buildLeaderClient()) { diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 38132b53ed300..db8562bac62ef 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -180,26 +180,6 @@ public void testFollowDataStreamFails() throws Exception { assertThat(failure.getMessage(), containsString("cannot follow [logs-syslog-prod], because it is a DATA_STREAM")); } - public void testChangeBackingIndexNameFails() throws Exception { - if ("follow".equals(targetCluster) == false) { - return; - } - - final String dataStreamName = "logs-foobar-prod"; - try (RestClient leaderClient = buildLeaderClient()) { - Request request = new Request("PUT", "/_data_stream/" + dataStreamName); - assertOK(leaderClient.performRequest(request)); - verifyDataStream(leaderClient, dataStreamName, DataStream.getDefaultBackingIndexName("logs-foobar-prod", 1)); - } - - ResponseException failure = expectThrows( - ResponseException.class, - () -> followIndex(DataStream.getDefaultBackingIndexName("logs-foobar-prod", 1), ".ds-logs-barbaz-prod-000001") - ); - assertThat(failure.getResponse().getStatusLine().getStatusCode(), equalTo(400)); - assertThat(failure.getMessage(), containsString("a backing index name in the local and remote cluster must remain the same")); - } - public void testFollowSearchableSnapshotsFails() throws Exception { final String testPrefix = getTestName().toLowerCase(Locale.ROOT); diff --git a/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java b/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java index 24eb234716c4e..c2210af7e0a13 100644 --- a/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java +++ b/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java @@ -281,7 +281,7 @@ public void testUnPromoteAndFollowDataStream() throws Exception { // Setup { - createAutoFollowPattern(adminClient(), "test_pattern", "logs-eu*", "leader_cluster"); + createAutoFollowPattern(adminClient(), "test_pattern", "logs-eu*", "leader_cluster", null); } // Create data stream and ensure that it is auto followed { diff --git a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java index f7df63db15f97..b95d9f60c62d9 100644 --- a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java +++ b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java @@ -335,7 +335,13 @@ protected static List verifyDataStream(final RestClient client, final St return List.copyOf(actualBackingIndices); } - protected static void createAutoFollowPattern(RestClient client, String name, String pattern, String remoteCluster) throws IOException { + protected static void createAutoFollowPattern( + RestClient client, + String name, + String pattern, + String remoteCluster, + String followIndexPattern + ) throws IOException { Request request = new Request("PUT", "/_ccr/auto_follow/" + name); try (XContentBuilder bodyBuilder = JsonXContent.contentBuilder()) { bodyBuilder.startObject(); @@ -345,6 +351,9 @@ protected static void createAutoFollowPattern(RestClient client, String name, St bodyBuilder.value(pattern); } bodyBuilder.endArray(); + if (followIndexPattern != null) { + bodyBuilder.field("follow_index_pattern", followIndexPattern); + } bodyBuilder.field("remote_cluster", remoteCluster); } bodyBuilder.endObject(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index a53ea9dc69039..b11fafd01f6b9 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; @@ -61,6 +62,8 @@ import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; @@ -72,9 +75,24 @@ */ public class AutoFollowCoordinator extends AbstractLifecycleComponent implements ClusterStateListener { + /** + * This is the string that will be replaced by the leader index name for a backing index or data + * stream. It allows auto-following to automatically rename an index or data stream when + * automatically followed. For example, using "{{leader_index}}_copy" for the follow pattern + * means that a data stream called "logs-foo-bar" would be renamed "logs-foo-bar_copy" when + * replicated, and a backing index called ".ds-logs-foo-bar-2022-02-02-000001" would be renamed + * to ".ds-logs-foo-bar_copy-2022-02-02-000001". + * See {@link AutoFollower#getFollowerIndexName} for the entire usage. + */ + public static final String AUTO_FOLLOW_PATTERN_REPLACEMENT = "{{leader_index}}"; + private static final Logger LOGGER = LogManager.getLogger(AutoFollowCoordinator.class); private static final int MAX_AUTO_FOLLOW_ERRORS = 256; + private static final Pattern DS_BACKING_PATTERN = Pattern.compile( + "^(.*?" + DataStream.BACKING_INDEX_PREFIX + ")(.+)-(\\d{4}.\\d{2}.\\d{2})(-[\\d]+)?$" + ); + private final Client client; private final ClusterService clusterService; private final CcrLicenseChecker ccrLicenseChecker; @@ -563,6 +581,12 @@ private void autoFollowIndices( cleanFollowedRemoteIndices(remoteClusterState, patterns); } + /** + * Go through all the leader indices that need to be followed, ensuring that they are + * auto-followed by only a single pattern, have soft-deletes enabled, are not + * searchable snapshots, and are not already followed. If all of those conditions are met, + * then follow the indices. + */ private void checkAutoFollowPattern( String autoFollowPattenName, String remoteClusterString, @@ -582,8 +606,13 @@ private void checkAutoFollowPattern( leaderIndicesToFollow.size() ); + // Loop through all the as-of-yet-unfollowed indices from the leader for (final Index indexToFollow : leaderIndicesToFollow) { + // Look up the abstraction for the given index, e.g., an index ".ds-foo" could look + // up the Data Stream "foo" IndexAbstraction indexAbstraction = remoteMetadata.getIndicesLookup().get(indexToFollow.getName()); + // Ensure that the remote cluster doesn't have other patterns + // that would follow the index, there can be only one. List otherMatchingPatterns = patternsForTheSameRemoteCluster.stream() .filter(otherPattern -> otherPattern.v2().match(indexAbstraction)) .map(Tuple::v1) @@ -605,6 +634,7 @@ private void checkAutoFollowPattern( ); } else { final IndexMetadata leaderIndexMetadata = remoteMetadata.getIndexSafe(indexToFollow); + // First ensure that the index on the leader that we want to follow has soft-deletes enabled if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(leaderIndexMetadata.getSettings()) == false) { String message = String.format( Locale.ROOT, @@ -639,10 +669,12 @@ private void checkAutoFollowPattern( error -> groupedListener.onResponse(new Tuple<>(indexToFollow, error)) ); } else { + // Finally, if there are no reasons why we cannot follow the leader index, perform the follow. followLeaderIndex( autoFollowPattenName, remoteClusterString, indexToFollow, + indexAbstraction, autoFollowPattern, headers, error -> groupedListener.onResponse(new Tuple<>(indexToFollow, error)) @@ -669,22 +701,32 @@ private static boolean leaderIndexAlreadyFollowed(AutoFollowPattern autoFollowPa return false; } - private void followLeaderIndex( - String autoFollowPattenName, - String remoteClusterString, + /** + * Given a remote cluster, index that will be followed (and its abstraction), as well as an + * {@link AutoFollowPattern}, generate the internal follow request for following the index. + */ + static PutFollowAction.Request generateRequest( + String remoteCluster, Index indexToFollow, - AutoFollowPattern pattern, - Map headers, - Consumer onResult + IndexAbstraction indexAbstraction, + AutoFollowPattern pattern ) { final String leaderIndexName = indexToFollow.getName(); final String followIndexName = getFollowerIndexName(pattern, leaderIndexName); PutFollowAction.Request request = new PutFollowAction.Request(); - request.setRemoteCluster(remoteClusterString); + request.setRemoteCluster(remoteCluster); request.setLeaderIndex(indexToFollow.getName()); request.setFollowerIndex(followIndexName); request.setSettings(pattern.getSettings()); + // If there was a pattern specified for renaming the backing index, and this index is + // part of a data stream, then send the new data stream name as part of the request. + if (pattern.getFollowIndexPattern() != null && indexAbstraction.getParentDataStream() != null) { + String dataStreamName = indexAbstraction.getParentDataStream().getDataStream().getName(); + // Send the follow index pattern as the data stream pattern, so that data streams can be + // renamed accordingly (not only the backing indices) + request.setDataStreamName(pattern.getFollowIndexPattern().replace(AUTO_FOLLOW_PATTERN_REPLACEMENT, dataStreamName)); + } request.getParameters().setMaxReadRequestOperationCount(pattern.getMaxReadRequestOperationCount()); request.getParameters().setMaxReadRequestSize(pattern.getMaxReadRequestSize()); request.getParameters().setMaxOutstandingReadRequests(pattern.getMaxOutstandingReadRequests()); @@ -697,9 +739,23 @@ private void followLeaderIndex( request.getParameters().setReadPollTimeout(pattern.getReadPollTimeout()); request.masterNodeTimeout(TimeValue.MAX_VALUE); + return request; + } + + private void followLeaderIndex( + String autoFollowPattenName, + String remoteClusterString, + Index indexToFollow, + IndexAbstraction indexAbstraction, + AutoFollowPattern pattern, + Map headers, + Consumer onResult + ) { + PutFollowAction.Request request = generateRequest(remoteClusterString, indexToFollow, indexAbstraction, pattern); + // Execute if the create and follow api call succeeds: Runnable successHandler = () -> { - LOGGER.info("auto followed leader index [{}] as follow index [{}]", indexToFollow, followIndexName); + LOGGER.info("auto followed leader index [{}] as follow index [{}]", indexToFollow, request.getFollowerIndex()); // This function updates the auto follow metadata in the cluster to record that the leader index has been followed: // (so that we do not try to follow it in subsequent auto follow runs) @@ -731,6 +787,22 @@ private void finalise(int slot, AutoFollowResult result, final Thread thread) { } } + /** + * Given an auto following pattern for a set of indices and the cluster state from a remote + * cluster, return the list of indices that need to be followed. The list of followed index + * UUIDs contains indices that have already been followed, so the returned list will only + * contain "new" indices from the leader that need to be followed. + * + * When looking up the name of the index to see if it matches one of the patterns, the index + * abstraction ({@link IndexAbstraction}) of the index is used for comparison, this means + * that if an index named ".ds-foo" was part of a data stream "foo", then an auto-follow + * pattern of "f*" would allow the ".ds-foo" index to be returned. + * + * @param autoFollowPattern pattern to check indices that may need to be followed + * @param remoteClusterState state from the remote ES cluster + * @param followedIndexUUIDs a collection of UUIDs of indices already being followed + * @return any new indices on the leader that need to be followed + */ static List getLeaderIndicesToFollow( AutoFollowPattern autoFollowPattern, ClusterState remoteClusterState, @@ -760,9 +832,45 @@ static List getLeaderIndicesToFollow( return leaderIndicesToFollow; } + /** + * Returns the new name for the follower index. If the auto-follow configuration includes a + * follow index pattern, the text "{@code {{leader_index}}}" is replaced with the original + * index name, so a leader index called "foo" and a pattern of "{{leader_index}}_copy" + * becomes a new follower index called "foo_copy". + */ static String getFollowerIndexName(AutoFollowPattern autoFollowPattern, String leaderIndexName) { - if (autoFollowPattern.getFollowIndexPattern() != null) { - return autoFollowPattern.getFollowIndexPattern().replace("{{leader_index}}", leaderIndexName); + final String followPattern = autoFollowPattern.getFollowIndexPattern(); + if (followPattern != null) { + if (leaderIndexName.contains(DataStream.BACKING_INDEX_PREFIX)) { + // The index being replicated is a data stream backing index, so it's something + // like: .ds--20XX-mm-dd-NNNNNN + // + // However, we cannot just replace the name with the proposed follow index + // pattern, or else we'll end up with something like ".ds-logs-foo-bar-2022-02-02-000001_copy" + // for "{{leader_index}}_copy", which will cause problems because it doesn't + // follow a parseable pattern. Instead it would be better to rename it as though + // the data stream name was the leader index name, ending up with + // ".ds-logs-foo-bar_copy-2022-02-02-000001" as the final index name. + Matcher m = DS_BACKING_PATTERN.matcher(leaderIndexName); + if (m.find()) { + return m.group(1) + // Prefix including ".ds-" + followPattern.replace(AUTO_FOLLOW_PATTERN_REPLACEMENT, m.group(2)) + // Data stream name changed + "-" + // Hyphen separator + m.group(3) + // Date math + m.group(4); + } else { + throw new IllegalArgumentException( + "unable to determine follower index name from leader index name [" + + leaderIndexName + + "] and follow index pattern: [" + + followPattern + + "], index appears to follow a regular data stream backing pattern, but could not be parsed" + ); + } + } else { + // If the index does nat contain a `.ds-`, then rename it as usual. + return followPattern.replace("{{leader_index}}", leaderIndexName); + } } else { return leaderIndexName; } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index 88301c49c2101..b95e03eb09f58 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -169,17 +170,6 @@ private void createFollowerIndex( return; } - if (remoteDataStream != null) { - // when following a backing index then the names of the backing index must be remain the same in the local - // and remote cluster. - if (request.getLeaderIndex().equals(request.getFollowerIndex()) == false) { - listener.onFailure( - new IllegalArgumentException("a backing index name in the local and remote cluster must remain the same") - ); - return; - } - } - final Settings overrideSettings = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, request.getFollowerIndex()) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) @@ -215,15 +205,37 @@ protected void doRun() { (delegatedListener, response) -> afterRestoreStarted(clientWithHeaders, request, delegatedListener, response) ); if (remoteDataStream == null) { + // If the index we're following is not part of a data stream, start the + // restoration of the index normally. restoreService.restoreSnapshot(restoreRequest, delegatelistener); } else { String followerIndexName = request.getFollowerIndex(); + // This method is used to update the metadata in the same cluster state + // update as the snapshot is restored. BiConsumer updater = (currentState, mdBuilder) -> { - DataStream localDataStream = mdBuilder.dataStreamMetadata().dataStreams().get(remoteDataStream.getName()); - Index followerIndex = mdBuilder.get(followerIndexName).getIndex(); - assert followerIndex != null; + final String localDataStreamName; + + // If we have been given a data stream name, use that name for the local + // data stream. See the javadoc for AUTO_FOLLOW_PATTERN_REPLACEMENT + // for more info. + final String dsName = request.getDataStreamName(); + if (Strings.hasText(dsName)) { + localDataStreamName = dsName; + } else { + // There was no specified name, use the original data stream name. + localDataStreamName = remoteDataStream.getName(); + } + final DataStream localDataStream = mdBuilder.dataStreamMetadata().dataStreams().get(localDataStreamName); + final Index followerIndex = mdBuilder.get(followerIndexName).getIndex(); + assert followerIndex != null + : "expected followerIndex " + followerIndexName + " to exist in the state, but it did not"; - DataStream updatedDataStream = updateLocalDataStream(followerIndex, localDataStream, remoteDataStream); + final DataStream updatedDataStream = updateLocalDataStream( + followerIndex, + localDataStream, + localDataStreamName, + remoteDataStream + ); mdBuilder.put(updatedDataStream); }; restoreService.restoreSnapshot(restoreRequest, delegatelistener, updater); @@ -303,12 +315,23 @@ private void initiateFollowing( ); } - static DataStream updateLocalDataStream(Index backingIndexToFollow, DataStream localDataStream, DataStream remoteDataStream) { + /** + * Given the backing index that the follower is going to follow, the local data stream (if it + * exists) and the remote data stream, return the new local data stream for the local cluster + * (the follower) updated with whichever information is necessary to restore the new + * soon-to-be-followed index. + */ + static DataStream updateLocalDataStream( + Index backingIndexToFollow, + DataStream localDataStream, + String localDataStreamName, + DataStream remoteDataStream + ) { if (localDataStream == null) { // The data stream and the backing indices have been created and validated in the remote cluster, // just copying the data stream is in this case safe. return new DataStream( - remoteDataStream.getName(), + localDataStreamName, List.of(backingIndexToFollow), remoteDataStream.getGeneration(), remoteDataStream.getMetadata(), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 825c2abeb95ac..f8cca99ce5e8e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; +import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -32,6 +33,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -74,6 +76,7 @@ import static org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower.recordLeaderIndexAsFollowFunction; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -1001,6 +1004,331 @@ public void testGetFollowerIndexName() { null ); assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0")); + + // Test that index of data stream type name works correctly: + autoFollowPattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "{{leader_index}}_copy", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + assertThat( + AutoFollower.getFollowerIndexName(autoFollowPattern, ".ds-logs-foo-bar-2022-02-01-123456"), + equalTo(".ds-logs-foo-bar_copy-2022-02-01-123456") + ); + + autoFollowPattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "prepend_{{leader_index}}", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + assertThat( + AutoFollower.getFollowerIndexName(autoFollowPattern, ".ds-logs-foo-bar-2022-02-01-123456"), + equalTo(".ds-prepend_logs-foo-bar-2022-02-01-123456") + ); + + } + + public void testGenerateRequest() { + // Renaming with a suffix and normal pattern backing indices + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "{{leader_index}}_copy", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + new IndexAbstraction.DataStream( + new DataStream("logs-foo-bar", List.of(index), 1, Map.of(), false, false, false, true, IndexMode.STANDARD) + ) + ); + + PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern); + assertThat(request.getRemoteCluster(), equalTo("remote")); + assertThat(request.getFollowerIndex(), equalTo(".ds-logs-foo-bar_copy-2022-02-01-123456")); + assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456")); + assertThat(request.getDataStreamName(), equalTo("logs-foo-bar_copy")); + } + + // Renaming with a prefix and normal pattern backing indices + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "copy_{{leader_index}}", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + new IndexAbstraction.DataStream( + new DataStream("logs-foo-bar", List.of(index), 1, Map.of(), false, false, false, true, IndexMode.STANDARD) + ) + ); + + PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern); + assertThat(request.getRemoteCluster(), equalTo("remote")); + assertThat(request.getFollowerIndex(), equalTo(".ds-copy_logs-foo-bar-2022-02-01-123456")); + assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456")); + assertThat(request.getDataStreamName(), equalTo("copy_logs-foo-bar")); + } + + // Renaming with a suffix and irregular pattern backing indices + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "{{leader_index}}_copy", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index("my-backing-index", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + new IndexAbstraction.DataStream( + new DataStream("logs-foo-bar", List.of(index), 1, Map.of(), false, false, false, true, IndexMode.STANDARD) + ) + ); + + PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern); + assertThat(request.getRemoteCluster(), equalTo("remote")); + assertThat(request.getFollowerIndex(), equalTo("my-backing-index_copy")); + assertThat(request.getLeaderIndex(), equalTo("my-backing-index")); + assertThat(request.getDataStreamName(), equalTo("logs-foo-bar_copy")); + } + + // Renaming with a suffix but not part of a data stream + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "{{leader_index}}_copy", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + null + ); + + PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern); + assertThat(request.getRemoteCluster(), equalTo("remote")); + assertThat(request.getFollowerIndex(), equalTo(".ds-logs-foo-bar_copy-2022-02-01-123456")); + assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456")); + assertThat(request.getDataStreamName(), equalTo(null)); + } + + // Regular backing index, but no renaming + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + null, + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + new IndexAbstraction.DataStream( + new DataStream("logs-foo-bar", List.of(index), 1, Map.of(), false, false, false, true, IndexMode.STANDARD) + ) + ); + + PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern); + assertThat(request.getRemoteCluster(), equalTo("remote")); + assertThat(request.getFollowerIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456")); + assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456")); + assertThat(request.getDataStreamName(), equalTo(null)); + } + + // Renaming with a suffix and just the worst named backing indices + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "{{leader_index}}_copy", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index("my-.ds-backing-index", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + new IndexAbstraction.DataStream( + new DataStream("logs-foo-bar", List.of(index), 1, Map.of(), false, false, false, true, IndexMode.STANDARD) + ) + ); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> AutoFollower.generateRequest("remote", index, indexAbstraction, pattern) + ); + assertThat( + e.getMessage(), + containsString( + "unable to determine follower index name from leader index name " + + "[my-.ds-backing-index] and follow index pattern: [{{leader_index}}_copy]" + + ", index appears to follow a regular data stream backing pattern, but could not be parsed" + ) + ); + } } public void testStats() { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowParametersTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowParametersTests.java index fd92bc3ecff99..93879f2dfb842 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowParametersTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowParametersTests.java @@ -38,6 +38,11 @@ protected Writeable.Reader instanceReader() { return FollowParameters::new; } + @Override + protected FollowParameters mutateInstance(FollowParameters instance) { + return randomInstance(); + } + static FollowParameters randomInstance() { FollowParameters followParameters = new FollowParameters(); followParameters.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java index 50fe5ce87182e..ab84ca9fd9ca7 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; @@ -38,6 +39,7 @@ protected PutFollowAction.Request createTestInstance() { Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), randomIntBetween(0, 4)).build() ); ResumeFollowActionRequestTests.generateFollowParameters(request.getParameters()); + request.setDataStreamName(randomAlphaOfLength(4)); return request; } @@ -53,6 +55,7 @@ protected PutFollowAction.Request createXContextTestInstance(XContentType xConte ); request.setFollowerIndex("followerIndex"); ResumeFollowActionRequestTests.generateFollowParameters(request.getParameters()); + request.setDataStreamName(randomAlphaOfLength(4)); return request; } @@ -61,6 +64,40 @@ protected PutFollowAction.Request doParseInstance(XContentParser parser) throws return PutFollowAction.Request.fromXContent(parser, "followerIndex", ActiveShardCount.DEFAULT); } + @Override + protected PutFollowAction.Request mutateInstance(PutFollowAction.Request instance) throws IOException { + PutFollowAction.Request request = new PutFollowAction.Request(); + request.setFollowerIndex(instance.getFollowerIndex()); + request.waitForActiveShards(instance.waitForActiveShards()); + request.setRemoteCluster(instance.getRemoteCluster()); + request.setLeaderIndex(instance.getLeaderIndex()); + request.setSettings(instance.getSettings()); + request.setParameters(instance.getParameters()); + request.setDataStreamName(instance.getDataStreamName()); + + switch (randomIntBetween(0, 6)) { + case 0 -> request.setFollowerIndex(randomAlphaOfLength(5)); + case 1 -> request.waitForActiveShards(new ActiveShardCount(randomIntBetween(3, 5))); + case 2 -> request.setRemoteCluster(randomAlphaOfLength(5)); + case 3 -> request.setLeaderIndex(randomAlphaOfLength(5)); + case 4 -> request.setSettings( + Settings.builder() + .put( + IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), + randomValueOtherThan( + IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(request.getSettings()), + ESTestCase::randomInt + ) + ) + .build() + ); + case 5 -> request.setParameters(FollowParametersTests.randomInstance()); + case 6 -> request.setDataStreamName(randomAlphaOfLength(5)); + default -> throw new AssertionError("failed branch"); + } + return request; + } + @Override protected boolean supportsUnknownFields() { return false; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java index 955623bdda743..61050b4172119 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java @@ -24,7 +24,12 @@ public class TransportPutFollowActionTests extends ESTestCase { public void testCreateNewLocalDataStream() { DataStream remoteDataStream = generateDataSteam("logs-foobar", 3, false); Index backingIndexToFollow = remoteDataStream.getIndices().get(remoteDataStream.getIndices().size() - 1); - DataStream result = TransportPutFollowAction.updateLocalDataStream(backingIndexToFollow, null, remoteDataStream); + DataStream result = TransportPutFollowAction.updateLocalDataStream( + backingIndexToFollow, + null, + remoteDataStream.getName(), + remoteDataStream + ); assertThat(result.getName(), equalTo(remoteDataStream.getName())); assertThat(result.getTimeStampField(), equalTo(remoteDataStream.getTimeStampField())); assertThat(result.getGeneration(), equalTo(remoteDataStream.getGeneration())); @@ -36,7 +41,12 @@ public void testUpdateLocalDataStream_followNewBackingIndex() { DataStream remoteDataStream = generateDataSteam("logs-foobar", 3, false); DataStream localDataStream = generateDataSteam("logs-foobar", 2, true); Index backingIndexToFollow = remoteDataStream.getIndices().get(remoteDataStream.getIndices().size() - 1); - DataStream result = TransportPutFollowAction.updateLocalDataStream(backingIndexToFollow, localDataStream, remoteDataStream); + DataStream result = TransportPutFollowAction.updateLocalDataStream( + backingIndexToFollow, + localDataStream, + remoteDataStream.getName(), + remoteDataStream + ); assertThat(result.getName(), equalTo(remoteDataStream.getName())); assertThat(result.getTimeStampField(), equalTo(remoteDataStream.getTimeStampField())); assertThat(result.getGeneration(), equalTo(remoteDataStream.getGeneration())); @@ -51,7 +61,12 @@ public void testUpdateLocalDataStream_followOlderBackingIndex() { DataStream remoteDataStream = generateDataSteam("logs-foobar", 5, false); DataStream localDataStream = generateDataSteam("logs-foobar", 5, true, DataStream.getDefaultBackingIndexName("logs-foobar", 5)); Index backingIndexToFollow = remoteDataStream.getIndices().get(0); - DataStream result = TransportPutFollowAction.updateLocalDataStream(backingIndexToFollow, localDataStream, remoteDataStream); + DataStream result = TransportPutFollowAction.updateLocalDataStream( + backingIndexToFollow, + localDataStream, + remoteDataStream.getName(), + remoteDataStream + ); assertThat(result.getName(), equalTo(remoteDataStream.getName())); assertThat(result.getTimeStampField(), equalTo(remoteDataStream.getTimeStampField())); assertThat(result.getGeneration(), equalTo(remoteDataStream.getGeneration())); @@ -62,7 +77,12 @@ public void testUpdateLocalDataStream_followOlderBackingIndex() { // follow second last backing index: localDataStream = result; backingIndexToFollow = remoteDataStream.getIndices().get(remoteDataStream.getIndices().size() - 2); - result = TransportPutFollowAction.updateLocalDataStream(backingIndexToFollow, localDataStream, remoteDataStream); + result = TransportPutFollowAction.updateLocalDataStream( + backingIndexToFollow, + localDataStream, + remoteDataStream.getName(), + remoteDataStream + ); assertThat(result.getName(), equalTo(remoteDataStream.getName())); assertThat(result.getTimeStampField(), equalTo(remoteDataStream.getTimeStampField())); assertThat(result.getGeneration(), equalTo(remoteDataStream.getGeneration())); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 910cf956c5dac..1b340a27bac2e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -15,9 +15,11 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -43,6 +45,7 @@ public static class Request extends AcknowledgedRequest implements Indi private static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster"); private static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); private static final ParseField SETTINGS_FIELD = new ParseField("settings"); + private static final ParseField DATA_STREAM_NAME = new ParseField("data_stream_name"); // Note that Request should be the Value class here for this parser with a 'parameters' field that maps to // PutFollowParameters class. But since two minor version are already released with duplicate follow parameters @@ -52,6 +55,7 @@ public static class Request extends AcknowledgedRequest implements Indi static { PARSER.declareString((putFollowParameters, value) -> putFollowParameters.remoteCluster = value, REMOTE_CLUSTER_FIELD); PARSER.declareString((putFollowParameters, value) -> putFollowParameters.leaderIndex = value, LEADER_INDEX_FIELD); + PARSER.declareString((putFollowParameters, value) -> putFollowParameters.dataStreamName = value, DATA_STREAM_NAME); PARSER.declareObject( (putFollowParameters, value) -> putFollowParameters.settings = value, (p, c) -> Settings.fromXContent(p), @@ -69,6 +73,7 @@ public static Request fromXContent(final XContentParser parser, final String fol request.setFollowerIndex(followerIndex); request.setRemoteCluster(parameters.remoteCluster); request.setLeaderIndex(parameters.leaderIndex); + request.setDataStreamName(parameters.dataStreamName); request.setSettings(parameters.settings); request.setParameters(parameters); return request; @@ -76,8 +81,10 @@ public static Request fromXContent(final XContentParser parser, final String fol private String remoteCluster; private String leaderIndex; - private Settings settings = Settings.EMPTY; private String followerIndex; + @Nullable + private String dataStreamName; + private Settings settings = Settings.EMPTY; private FollowParameters parameters = new FollowParameters(); private ActiveShardCount waitForActiveShards = ActiveShardCount.NONE; @@ -123,6 +130,15 @@ public void setParameters(FollowParameters parameters) { this.parameters = parameters; } + @Nullable + public String getDataStreamName() { + return dataStreamName; + } + + public void setDataStreamName(String dataStreamName) { + this.dataStreamName = dataStreamName; + } + public ActiveShardCount waitForActiveShards() { return waitForActiveShards; } @@ -156,6 +172,9 @@ public ActionRequestValidationException validate() { if (followerIndex == null) { e = addValidationError("follower_index is missing", e); } + if (dataStreamName != null && Strings.hasText(dataStreamName) == false) { + e = addValidationError("data stream name must contain text if present", e); + } return e; } @@ -179,6 +198,9 @@ public Request(StreamInput in) throws IOException { } this.parameters = new FollowParameters(in); waitForActiveShards(ActiveShardCount.readFrom(in)); + if (in.getVersion().onOrAfter(Version.V_8_5_0)) { + this.dataStreamName = in.readOptionalString(); + } } @Override @@ -192,6 +214,9 @@ public void writeTo(StreamOutput out) throws IOException { } parameters.writeTo(out); waitForActiveShards.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_8_5_0)) { + out.writeOptionalString(this.dataStreamName); + } } @Override @@ -200,6 +225,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws { builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), remoteCluster); builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); + if (dataStreamName != null) { + builder.field(DATA_STREAM_NAME.getPreferredName(), dataStreamName); + } if (settings.isEmpty() == false) { builder.startObject(SETTINGS_FIELD.getPreferredName()); { @@ -222,12 +250,14 @@ public boolean equals(Object o) { && Objects.equals(leaderIndex, request.leaderIndex) && Objects.equals(followerIndex, request.followerIndex) && Objects.equals(parameters, request.parameters) - && Objects.equals(waitForActiveShards, request.waitForActiveShards); + && Objects.equals(waitForActiveShards, request.waitForActiveShards) + && Objects.equals(dataStreamName, request.dataStreamName) + && Objects.equals(settings, request.settings); } @Override public int hashCode() { - return Objects.hash(remoteCluster, leaderIndex, followerIndex, parameters, waitForActiveShards); + return Objects.hash(remoteCluster, leaderIndex, followerIndex, parameters, settings, waitForActiveShards, dataStreamName); } // This class only exists for reuse of the FollowParameters class, see comment above the parser field. @@ -235,6 +265,7 @@ private static class PutFollowParameters extends FollowParameters { private String remoteCluster; private String leaderIndex; + private String dataStreamName; private Settings settings = Settings.EMPTY; } From 5194d29b1c49524cfa94a33ff798fa3a3bc160d9 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 1 Aug 2022 08:23:36 -0700 Subject: [PATCH 034/265] Support source fallback for byte, short, and long fields (#88954) This change adds source fallback support for byte, short, and long fields. These use the already existing class SourceValueFetcherSortedNumericIndexFieldData. --- docs/changelog/88954.yaml | 5 + .../test/painless/50_script_doc_values.yml | 374 +++++++++++++++++- .../index/mapper/NumberFieldMapper.java | 45 +++ 3 files changed, 420 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/88954.yaml diff --git a/docs/changelog/88954.yaml b/docs/changelog/88954.yaml new file mode 100644 index 0000000000000..9452fd90f0f82 --- /dev/null +++ b/docs/changelog/88954.yaml @@ -0,0 +1,5 @@ +pr: 88954 +summary: "Support source fallback for byte, short, and long fields" +area: Mapping +type: enhancement +issues: [] diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml index 4ba1b270cd995..761787365d38e 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -16,8 +16,8 @@ setup: geo_point: type: geo_point geo_point_no_doc_values: - type: geo_point - doc_values: false + type: geo_point + doc_values: false ip: type: ip keyword: @@ -27,15 +27,24 @@ setup: doc_values: false long: type: long + long_no_doc_values: + type: long + doc_values: false integer: type: integer integer_no_doc_values: - type: integer - doc_values: false + type: integer + doc_values: false short: type: short + short_no_doc_values: + type: short + doc_values: false byte: type: byte + byte_no_doc_values: + type: byte + doc_values: false double: type: double float: @@ -69,10 +78,13 @@ setup: keyword: not split at all keyword_no_doc_values: no doc values long: 12348732141234 + long_no_doc_values: 12348732141234 integer: 134134566 integer_no_doc_values: 134134566 short: 1324 + short_no_doc_values: 1324 byte: 12 + byte_no_doc_values: 12 double: 3.14159265358979 float: 3.141592654 half_float: 3.140625 @@ -101,10 +113,13 @@ setup: keyword: ["one string", "another string"] keyword_no_doc_values: ["no doc values 1", "no doc values 0", "no doc values 2"] long: [1152921504606846976, 576460752303423488] + long_no_doc_values: [576460752303423488, 1152921504606846976, -1152921504606846976] integer: [5, 17, 29] integer_no_doc_values: [17, 29, 5] short: [6, 18, 30, 45] + short_no_doc_values: [30, 45, 18, 6] byte: [16, 32, 64, 8, 4] + byte_no_doc_values: [16, 8, 32, 4, 64] double: [3.141592653588, 2.141592653587] float: [1.123, 2.234] half_float: [1.123, 2.234] @@ -1081,6 +1096,84 @@ setup: source: "doc['long'].value" - match: { hits.hits.0.fields.field.0: 12348732141234 } +--- +"long_no_doc_values": + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['long_no_doc_values'].get(0)" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['long_no_doc_values'].value" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('long_no_doc_values').get(-1)" + - match: { hits.hits.0.fields.field.0: 12348732141234 } + - match: { hits.hits.1.fields.field.0: -1 } + - match: { hits.hits.2.fields.field.0: -1152921504606846976 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('long_no_doc_values', -1)" + - match: { hits.hits.0.fields.field.0: 12348732141234 } + - match: { hits.hits.1.fields.field.0: -1 } + - match: { hits.hits.2.fields.field.0: -1152921504606846976 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('long_no_doc_values').get(2, -3)" + - match: { hits.hits.0.fields.field.0: -3 } + - match: { hits.hits.1.fields.field.0: -3 } + - match: { hits.hits.2.fields.field.0: 1152921504606846976 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "long total = 0; for (long i : field('long_no_doc_values')) { total += i; } total + field('long_no_doc_values').size();" + - match: { hits.hits.0.fields.field.0: 12348732141235 } + - match: { hits.hits.1.fields.field.0: 0 } + - match: { hits.hits.2.fields.field.0: 576460752303423491 } + --- "integer": - do: @@ -1342,6 +1435,123 @@ setup: - match: { hits.hits.1.fields.field.0: 0 } - match: { hits.hits.2.fields.field.0: 103 } +--- +"short_no_doc_values": + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['short_no_doc_values'].get(0)" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['short_no_doc_values'].value" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('short_no_doc_values').get(-1)" + - match: { hits.hits.0.fields.field.0: 1324 } + - match: { hits.hits.1.fields.field.0: -1 } + - match: { hits.hits.2.fields.field.0: 6 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('short_no_doc_values', -1)" + - match: { hits.hits.0.fields.field.0: 1324 } + - match: { hits.hits.1.fields.field.0: -1 } + - match: { hits.hits.2.fields.field.0: 6 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "short defaultShort = -1; field('short_no_doc_values').get(defaultShort)" + - match: { hits.hits.0.fields.field.0: 1324 } + - match: { hits.hits.1.fields.field.0: -1 } + - match: { hits.hits.2.fields.field.0: 6 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "short defaultShort = -1; $('short_no_doc_values', defaultShort)" + - match: { hits.hits.0.fields.field.0: 1324 } + - match: { hits.hits.1.fields.field.0: -1 } + - match: { hits.hits.2.fields.field.0: 6 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('short_no_doc_values').get(1, -3)" + - match: { hits.hits.0.fields.field.0: -3 } + - match: { hits.hits.1.fields.field.0: -3 } + - match: { hits.hits.2.fields.field.0: 18 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "short defaultShort = -3; field('short_no_doc_values').get(1, defaultShort)" + - match: { hits.hits.0.fields.field.0: -3 } + - match: { hits.hits.1.fields.field.0: -3 } + - match: { hits.hits.2.fields.field.0: 18 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "int total = 0; for (short s : field('short_no_doc_values')) { total += s; } total + field('short_no_doc_values').size();" + - match: { hits.hits.0.fields.field.0: 1325 } + - match: { hits.hits.1.fields.field.0: 0 } + - match: { hits.hits.2.fields.field.0: 103 } + --- "byte": - do: @@ -1496,6 +1706,162 @@ setup: - match: { hits.hits.1.fields.field.0: 0 } - match: { hits.hits.2.fields.field.0: 129 } +--- +"byte_no_doc_values": + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['byte_no_doc_values'].get(0)" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['byte_no_doc_values'].value" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('byte_no_doc_values').get((byte) 5)" + - match: { hits.hits.0.fields.field.0: 12 } + - match: { hits.hits.1.fields.field.0: 5 } + - match: { hits.hits.2.fields.field.0: 4 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('byte_no_doc_values', (byte) 5)" + - match: { hits.hits.0.fields.field.0: 12 } + - match: { hits.hits.1.fields.field.0: 5 } + - match: { hits.hits.2.fields.field.0: 4 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "byte defaultByte = 5; field('byte_no_doc_values').get(defaultByte)" + - match: { hits.hits.0.fields.field.0: 12 } + - match: { hits.hits.1.fields.field.0: 5 } + - match: { hits.hits.2.fields.field.0: 4 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "byte defaultByte = 5; $('byte_no_doc_values', defaultByte)" + - match: { hits.hits.0.fields.field.0: 12 } + - match: { hits.hits.1.fields.field.0: 5 } + - match: { hits.hits.2.fields.field.0: 4 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('byte_no_doc_values').get(5)" + - match: { hits.hits.0.fields.field.0: 12 } + - match: { hits.hits.1.fields.field.0: 5 } + - match: { hits.hits.2.fields.field.0: 4 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('byte_no_doc_values', 5)" + - match: { hits.hits.0.fields.field.0: 12 } + - match: { hits.hits.1.fields.field.0: 5 } + - match: { hits.hits.2.fields.field.0: 4 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('byte_no_doc_values').get(1, (byte) 7)" + - match: { hits.hits.0.fields.field.0: 7 } + - match: { hits.hits.1.fields.field.0: 7 } + - match: { hits.hits.2.fields.field.0: 8 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "byte defaultByte = 7; field('byte_no_doc_values').get(1, defaultByte)" + - match: { hits.hits.0.fields.field.0: 7 } + - match: { hits.hits.1.fields.field.0: 7 } + - match: { hits.hits.2.fields.field.0: 8 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('byte_no_doc_values').get(1, 7)" + - match: { hits.hits.0.fields.field.0: 7 } + - match: { hits.hits.1.fields.field.0: 7 } + - match: { hits.hits.2.fields.field.0: 8 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "int total = 0; for (byte s : field('byte_no_doc_values')) { total += s; } total + field('byte_no_doc_values').size();" + - match: { hits.hits.0.fields.field.0: 13 } + - match: { hits.hits.1.fields.field.0: 0 } + - match: { hits.hits.2.fields.field.0: 129 } + --- "double": - do: diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 7c4b4384ddb7e..60738a6e2156f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -708,6 +708,21 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedNumericIndexFieldData.Builder(name, numericType(), ByteDocValuesField::new); } + @Override + public IndexFieldData.Builder getValueFetcherFieldDataBuilder( + String name, + SourceLookup sourceLookup, + ValueFetcher valueFetcher + ) { + return new SourceValueFetcherSortedNumericIndexFieldData.Builder( + name, + numericType().getValuesSourceType(), + valueFetcher, + sourceLookup, + ByteDocValuesField::new + ); + } + @Override SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String fieldSimpleName) { return NumberType.syntheticLongFieldLoader(fieldName, fieldSimpleName); @@ -781,6 +796,21 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedNumericIndexFieldData.Builder(name, numericType(), ShortDocValuesField::new); } + @Override + public IndexFieldData.Builder getValueFetcherFieldDataBuilder( + String name, + SourceLookup sourceLookup, + ValueFetcher valueFetcher + ) { + return new SourceValueFetcherSortedNumericIndexFieldData.Builder( + name, + numericType().getValuesSourceType(), + valueFetcher, + sourceLookup, + ShortDocValuesField::new + ); + } + @Override SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String fieldSimpleName) { return NumberType.syntheticLongFieldLoader(fieldName, fieldSimpleName); @@ -1046,6 +1076,21 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedNumericIndexFieldData.Builder(name, numericType(), LongDocValuesField::new); } + @Override + public IndexFieldData.Builder getValueFetcherFieldDataBuilder( + String name, + SourceLookup sourceLookup, + ValueFetcher valueFetcher + ) { + return new SourceValueFetcherSortedNumericIndexFieldData.Builder( + name, + numericType().getValuesSourceType(), + valueFetcher, + sourceLookup, + LongDocValuesField::new + ); + } + @Override SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String fieldSimpleName) { return syntheticLongFieldLoader(fieldName, fieldSimpleName); From b7240393c6cb41d5b644250e18db20ab7248cf25 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 1 Aug 2022 17:53:32 +0200 Subject: [PATCH 035/265] Save loop over all local shards in IndicesClusterService.applyClusterState (#88210) We can save another two loops here by checking for shards to fail in the same loop that updates or creates shards. Also, we only need to loop over all indices services locally once for deleting indices as a whole or just shards out of existing indices. --- .../cluster/IndicesClusterStateService.java | 133 +++++++----------- 1 file changed, 54 insertions(+), 79 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 4e67ace32dda6..fc29f0426ef0a 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -219,11 +219,7 @@ public synchronized void applyClusterState(final ClusterChangedEvent event) { deleteIndices(event); // also deletes shards of deleted indices - removeIndices(event); // also removes shards of removed indices - - failMissingShards(state); - - removeShards(state); // removes any local shards that doesn't match what the master expects + removeIndicesAndShards(event); // also removes shards of removed indices updateIndices(event); // can also fail shards, but these are then guaranteed to be in failedShardsCache @@ -357,10 +353,11 @@ protected void doRun() throws Exception { * Removes indices that have no shards allocated to this node or indices whose state has changed. This does not delete the shard data * as we wait for enough shard copies to exist in the cluster before deleting shard data (triggered by * {@link org.elasticsearch.indices.store.IndicesStore}). + * Also removes shards that are currently loaded by indicesService but have disappeared from the routing table of the current node. * * @param event the cluster changed event */ - private void removeIndices(final ClusterChangedEvent event) { + private void removeIndicesAndShards(final ClusterChangedEvent event) { final ClusterState state = event.state(); final String localNodeId = state.nodes().getLocalNodeId(); assert localNodeId != null; @@ -390,83 +387,50 @@ private void removeIndices(final ClusterChangedEvent event) { if (reason != null) { logger.debug("{} removing index ({})", index, reason); indicesService.removeIndex(index, reason, "removing index (" + reason + ")"); + } else { + // remove shards based on routing nodes (no deletion of data) + for (Shard shard : indexService) { + ShardRouting currentRoutingEntry = shard.routingEntry(); + ShardId shardId = currentRoutingEntry.shardId(); + ShardRouting newShardRouting = localRoutingNode.getByShardId(shardId); + if (newShardRouting == null) { + // we can just remove the shard without cleaning it locally, since we will clean it in IndicesStore + // once all shards are allocated + logger.debug("{} removing shard (not allocated)", shardId); + indexService.removeShard(shardId.id(), "removing shard (not allocated)"); + } else if (newShardRouting.isSameAllocation(currentRoutingEntry) == false) { + logger.debug( + "{} removing shard (stale allocation id, stale {}, new {})", + shardId, + currentRoutingEntry, + newShardRouting + ); + indexService.removeShard(shardId.id(), "removing shard (stale copy)"); + } else if (newShardRouting.initializing() && currentRoutingEntry.active()) { + // this can happen if the node was isolated/gc-ed, rejoins the cluster and a new shard with the same allocation id + // is assigned to it. Batch cluster state processing or if shard fetching completes before the node gets a new + // cluster state may result in a new shard being initialized while having the same allocation id as the currently + // started shard. + logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting); + indexService.removeShard(shardId.id(), "removing shard (stale copy)"); + } else if (newShardRouting.primary() && currentRoutingEntry.primary() == false && newShardRouting.initializing()) { + assert currentRoutingEntry.initializing() : currentRoutingEntry; // see above if clause + // this can happen when cluster state batching batches activation of the shard, closing an index, reopening it + // and assigning an initializing primary to this node + logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting); + indexService.removeShard(shardId.id(), "removing shard (stale copy)"); + } + } } } } /** - * Notifies master about shards that don't exist but are supposed to be active on this node. - * - * @param state new cluster state - */ - private void failMissingShards(final ClusterState state) { - RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); - if (localRoutingNode == null) { - return; - } - for (final ShardRouting shardRouting : localRoutingNode) { - ShardId shardId = shardRouting.shardId(); - if (shardRouting.initializing() == false - && failedShardsCache.containsKey(shardId) == false - && indicesService.getShardOrNull(shardId) == null) { - // the master thinks we are active, but we don't have this shard at all, mark it as failed - sendFailShard( - shardRouting, - "master marked shard as active, but shard has not been created, mark shard as failed", - null, - state - ); - } - } - } - - /** - * Removes shards that are currently loaded by indicesService but have disappeared from the routing table of the current node. - * This method does not delete the shard data. + * Notifies master about shards that don't exist but are supposed to be active on this node, creates new shards that are supposed to + * be initializing on this node and if needed updates the state of existing shards with the new cluster state. * * @param state new cluster state */ - private void removeShards(final ClusterState state) { - final String localNodeId = state.nodes().getLocalNodeId(); - assert localNodeId != null; - - // remove shards based on routing nodes (no deletion of data) - RoutingNode localRoutingNode = state.getRoutingNodes().node(localNodeId); - for (AllocatedIndex indexService : indicesService) { - for (Shard shard : indexService) { - ShardRouting currentRoutingEntry = shard.routingEntry(); - ShardId shardId = currentRoutingEntry.shardId(); - ShardRouting newShardRouting = localRoutingNode == null ? null : localRoutingNode.getByShardId(shardId); - if (newShardRouting == null) { - // we can just remove the shard without cleaning it locally, since we will clean it in IndicesStore - // once all shards are allocated - logger.debug("{} removing shard (not allocated)", shardId); - indexService.removeShard(shardId.id(), "removing shard (not allocated)"); - } else if (newShardRouting.isSameAllocation(currentRoutingEntry) == false) { - logger.debug( - "{} removing shard (stale allocation id, stale {}, new {})", - shardId, - currentRoutingEntry, - newShardRouting - ); - indexService.removeShard(shardId.id(), "removing shard (stale copy)"); - } else if (newShardRouting.initializing() && currentRoutingEntry.active()) { - // this can happen if the node was isolated/gc-ed, rejoins the cluster and a new shard with the same allocation id - // is assigned to it. Batch cluster state processing or if shard fetching completes before the node gets a new cluster - // state may result in a new shard being initialized while having the same allocation id as the currently started shard. - logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting); - indexService.removeShard(shardId.id(), "removing shard (stale copy)"); - } else if (newShardRouting.primary() && currentRoutingEntry.primary() == false && newShardRouting.initializing()) { - assert currentRoutingEntry.initializing() : currentRoutingEntry; // see above if clause - // this can happen when cluster state batching batches activation of the shard, closing an index, reopening it - // and assigning an initializing primary to this node - logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting); - indexService.removeShard(shardId.id(), "removing shard (stale copy)"); - } - } - } - } - private void createIndicesAndUpdateShards(final ClusterState state) { DiscoveryNodes nodes = state.nodes(); RoutingNode localRoutingNode = state.getRoutingNodes().node(nodes.getLocalNodeId()); @@ -480,13 +444,24 @@ private void createIndicesAndUpdateShards(final ClusterState state) { // service is found final Map> indicesToCreate = new HashMap<>(); for (ShardRouting shardRouting : localRoutingNode) { - if (failedShardsCache.containsKey(shardRouting.shardId()) == false) { + ShardId shardId = shardRouting.shardId(); + if (failedShardsCache.containsKey(shardId) == false) { final Index index = shardRouting.index(); final var indexService = indicesService.indexService(index); - if (indexService == null) { - indicesToCreate.computeIfAbsent(index, k -> new ArrayList<>()).add(shardRouting); + if (shardRouting.initializing() == false && (indexService == null || indexService.getShardOrNull(shardId.id()) == null)) { + // the master thinks we are active, but we don't have this shard at all, mark it as failed + sendFailShard( + shardRouting, + "master marked shard as active, but shard has not been created, mark shard as failed", + null, + state + ); } else { - createOrUpdateShard(state, nodes, routingTable, shardRouting, indexService); + if (indexService == null) { + indicesToCreate.computeIfAbsent(index, k -> new ArrayList<>()).add(shardRouting); + } else { + createOrUpdateShard(state, nodes, routingTable, shardRouting, indexService); + } } } } From 6121a8ad5d111b84e7c2596864e3e036dad4fc58 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 1 Aug 2022 10:30:50 -0600 Subject: [PATCH 036/265] Disable BWC tests for backport of #88875 (#88997) --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index 4d84dcbdb7ffd..cdcf6815c0bec 100644 --- a/build.gradle +++ b/build.gradle @@ -136,9 +136,9 @@ tasks.register("verifyVersions") { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = true +boolean bwc_tests_enabled = false // place a PR link here when committing bwc changes: -String bwc_tests_disabled_issue = "" +String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/88995" if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") From ad2dc834a72265a76b1f329c8174b41d64047bf5 Mon Sep 17 00:00:00 2001 From: Christos Soulios <1561376+csoulios@users.noreply.github.com> Date: Mon, 1 Aug 2022 20:42:25 +0300 Subject: [PATCH 037/265] Add `synthetic_source` support to `aggregate_metric_double` fields (#88909) This PR implements synthetic_source support to the aggregate_metric_double field type Relates to #86603 --- docs/changelog/88909.yaml | 5 + .../mapping/fields/synthetic-source.asciidoc | 1 + .../types/aggregate-metric-double.asciidoc | 52 ++++++++++ .../index/mapper/NumberFieldMapper.java | 8 +- .../index/mapper/MapperTestCase.java | 27 ++++-- .../AggregateDoubleMetricFieldMapper.java | 94 ++++++++++++++++++- ...AggregateDoubleMetricFieldMapperTests.java | 64 +++++++++++-- .../100_synthetic_source.yml | 53 +++++++++++ 8 files changed, 284 insertions(+), 20 deletions(-) create mode 100644 docs/changelog/88909.yaml create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml diff --git a/docs/changelog/88909.yaml b/docs/changelog/88909.yaml new file mode 100644 index 0000000000000..231871183862d --- /dev/null +++ b/docs/changelog/88909.yaml @@ -0,0 +1,5 @@ +pr: 88909 +summary: Add `synthetic_source` support to `aggregate_metric_double` fields +area: Mapping +type: enhancement +issues: [] diff --git a/docs/reference/mapping/fields/synthetic-source.asciidoc b/docs/reference/mapping/fields/synthetic-source.asciidoc index 875800f8d86c7..32731423a4691 100644 --- a/docs/reference/mapping/fields/synthetic-source.asciidoc +++ b/docs/reference/mapping/fields/synthetic-source.asciidoc @@ -28,6 +28,7 @@ space. There are a couple of restrictions to be aware of: * Synthetic `_source` can be used with indices that contain only these field types: +** <> ** <> ** <> ** <> diff --git a/docs/reference/mapping/types/aggregate-metric-double.asciidoc b/docs/reference/mapping/types/aggregate-metric-double.asciidoc index d6955186a4779..61b4adf2fd029 100644 --- a/docs/reference/mapping/types/aggregate-metric-double.asciidoc +++ b/docs/reference/mapping/types/aggregate-metric-double.asciidoc @@ -251,3 +251,55 @@ The search returns the following hit. The value of the `default_metric` field, } ---- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,/] + +ifeval::["{release-state}"=="unreleased"] +[[aggregate-metric-double-synthetic-source]] +==== Synthetic source +`aggregate_metric-double` fields support <> in their default +configuration. Synthetic `_source` cannot be used together with <>. + +For example: +[source,console,id=synthetic-source-aggregate-metric-double-example] +---- +PUT idx +{ + "mappings": { + "_source": { "mode": "synthetic" }, + "properties": { + "agg_metric": { + "type": "aggregate_metric_double", + "metrics": [ "min", "max", "sum", "value_count" ], + "default_metric": "max" + } + } + } +} + +PUT idx/_doc/1 +{ + "agg_metric": { + "min": -302.50, + "max": 702.30, + "sum": 200.0, + "value_count": 25 + } +} +---- +// TEST[s/$/\nGET idx\/_doc\/1?filter_path=_source\n/] + +Will become: + +[source,console-result] +---- +{ + "agg_metric": { + "min": -302.50, + "max": 702.30, + "sum": 200.0, + "value_count": 25 + } +} +---- +// TEST[s/^/{"_source":/ s/\n$/}/] + +endif::[] diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 60738a6e2156f..9890e08568784 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -1719,7 +1719,7 @@ protected NumericSyntheticFieldLoader(String name, String simpleName) { @Override public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException { - SortedNumericDocValues dv = dv(reader); + SortedNumericDocValues dv = docValuesOrNull(reader, name); if (dv == null) { return SourceLoader.SyntheticFieldLoader.NOTHING_LEAF; } @@ -1830,12 +1830,12 @@ public void write(XContentBuilder b) throws IOException { * an "empty" implementation if there aren't any doc values. We need to be able to * tell if there aren't any and return our empty leaf source loader. */ - private SortedNumericDocValues dv(LeafReader reader) throws IOException { - SortedNumericDocValues dv = reader.getSortedNumericDocValues(name); + public static SortedNumericDocValues docValuesOrNull(LeafReader reader, String fieldName) throws IOException { + SortedNumericDocValues dv = reader.getSortedNumericDocValues(fieldName); if (dv != null) { return dv; } - NumericDocValues single = reader.getNumericDocValues(name); + NumericDocValues single = reader.getNumericDocValues(fieldName); if (single != null) { return DocValues.singleton(single); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index 387fa339e65c4..231eaff76d14e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -253,6 +253,13 @@ protected boolean supportsMeta() { return true; } + /** + * Override to disable testing {@code copy_to} in fields that don't support it. + */ + protected boolean supportsCopyTo() { + return true; + } + protected void metaMapping(XContentBuilder b) throws IOException { minimalMapping(b); } @@ -893,15 +900,17 @@ public final void testSyntheticEmptyList() throws IOException { public final void testSyntheticSourceInvalid() throws IOException { List examples = new ArrayList<>(syntheticSourceSupport().invalidExample()); - examples.add( - new SyntheticSourceInvalidExample( - matchesPattern("field \\[field] of type \\[.+] doesn't support synthetic source because it declares copy_to"), - b -> { - syntheticSourceSupport().example(5).mapping().accept(b); - b.field("copy_to", "bar"); - } - ) - ); + if (supportsCopyTo()) { + examples.add( + new SyntheticSourceInvalidExample( + matchesPattern("field \\[field] of type \\[.+] doesn't support synthetic source because it declares copy_to"), + b -> { + syntheticSourceSupport().example(5).mapping().accept(b); + b.field("copy_to", "bar"); + } + ) + ); + } for (SyntheticSourceInvalidExample example : examples) { Exception e = expectThrows( IllegalArgumentException.class, diff --git a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java index f8f9bd9f61483..481f841e88904 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java @@ -9,6 +9,7 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.Query; @@ -32,6 +33,7 @@ import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.SimpleMappedFieldType; +import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.SourceValueFetcher; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.mapper.TimeSeriesParams; @@ -574,7 +576,6 @@ public Iterator iterator() { @Override protected void parseCreateField(DocumentParserContext context) throws IOException { - context.path().add(simpleName()); XContentParser.Token token; XContentSubParser subParser = null; @@ -675,4 +676,95 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio public FieldMapper.Builder getMergeBuilder() { return new Builder(simpleName(), ignoreMalformedByDefault, indexCreatedVersion).metric(metricType).init(this); } + + @Override + public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { + if (ignoreMalformed) { + throw new IllegalArgumentException( + "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it ignores malformed numbers" + ); + } + return new AggregateMetricSyntheticFieldLoader(name(), simpleName(), metrics); + } + + public static class AggregateMetricSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + private final String name; + private final String simpleName; + private final EnumSet metrics; + + protected AggregateMetricSyntheticFieldLoader(String name, String simpleName, EnumSet metrics) { + this.name = name; + this.simpleName = simpleName; + this.metrics = metrics; + } + + @Override + public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException { + Map metricDocValues = new EnumMap<>(Metric.class); + for (Metric m : metrics) { + String fieldName = subfieldName(name, m); + SortedNumericDocValues dv = NumberFieldMapper.NumericSyntheticFieldLoader.docValuesOrNull(reader, fieldName); + if (dv != null) { + metricDocValues.put(m, dv); + } + } + + if (metricDocValues.isEmpty()) { + return SourceLoader.SyntheticFieldLoader.NOTHING_LEAF; + } + + return new AggregateMetricSyntheticFieldLoader.ImmediateLeaf(metricDocValues); + } + + private class ImmediateLeaf implements Leaf { + private final Map metricDocValues; + private final Set metricHasValue = EnumSet.noneOf(Metric.class); + + ImmediateLeaf(Map metricDocValues) { + assert metricDocValues.isEmpty() == false : "doc_values for metrics cannot be empty"; + this.metricDocValues = metricDocValues; + } + + @Override + public boolean empty() { + return false; + } + + @Override + public boolean advanceToDoc(int docId) throws IOException { + // It is required that all defined metrics must exist. In this case + // it is enough to check for the first docValue. However, in the future + // we may relax the requirement of all metrics existing. In this case + // we should check the doc value for each metric separately + metricHasValue.clear(); + for (Map.Entry e : metricDocValues.entrySet()) { + if (e.getValue().advanceExact(docId)) { + metricHasValue.add(e.getKey()); + } + } + + return metricHasValue.isEmpty() == false; + } + + @Override + public void write(XContentBuilder b) throws IOException { + if (metricHasValue.isEmpty()) { + return; + } + b.startObject(simpleName); + for (Map.Entry entry : metricDocValues.entrySet()) { + if (metricHasValue.contains(entry.getKey())) { + String metricName = entry.getKey().name(); + long value = entry.getValue().nextValue(); + if (entry.getKey() == Metric.value_count) { + b.field(metricName, value); + } else { + b.field(metricName, NumericUtils.sortableLongToDouble(value)); + } + } + } + b.endObject(); + } + } + } } diff --git a/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java b/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java index 904effcd6283e..35870fcd7307c 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java @@ -20,12 +20,16 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin; +import org.elasticsearch.xpack.aggregatemetric.mapper.AggregateDoubleMetricFieldMapper.Metric; import org.hamcrest.Matchers; import org.junit.AssumptionViolatedException; import java.io.IOException; +import java.util.Arrays; import java.util.Collection; +import java.util.EnumSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -33,6 +37,7 @@ import static org.elasticsearch.xpack.aggregatemetric.mapper.AggregateDoubleMetricFieldMapper.Names.METRICS; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.matchesPattern; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsInstanceOf.instanceOf; @@ -393,7 +398,7 @@ public void testExplicitDefaultMetric() throws Exception { Mapper fieldMapper = mapper.mappers().getMapper("field"); assertThat(fieldMapper, instanceOf(AggregateDoubleMetricFieldMapper.class)); - assertEquals(AggregateDoubleMetricFieldMapper.Metric.sum, ((AggregateDoubleMetricFieldMapper) fieldMapper).defaultMetric()); + assertEquals(Metric.sum, ((AggregateDoubleMetricFieldMapper) fieldMapper).defaultMetric()); } /** @@ -406,7 +411,7 @@ public void testImplicitDefaultMetricSingleMetric() throws Exception { Mapper fieldMapper = mapper.mappers().getMapper("field"); assertThat(fieldMapper, instanceOf(AggregateDoubleMetricFieldMapper.class)); - assertEquals(AggregateDoubleMetricFieldMapper.Metric.value_count, ((AggregateDoubleMetricFieldMapper) fieldMapper).defaultMetric); + assertEquals(Metric.value_count, ((AggregateDoubleMetricFieldMapper) fieldMapper).defaultMetric); } /** @@ -416,7 +421,7 @@ public void testImplicitDefaultMetric() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); Mapper fieldMapper = mapper.mappers().getMapper("field"); assertThat(fieldMapper, instanceOf(AggregateDoubleMetricFieldMapper.class)); - assertEquals(AggregateDoubleMetricFieldMapper.Metric.max, ((AggregateDoubleMetricFieldMapper) fieldMapper).defaultMetric); + assertEquals(Metric.max, ((AggregateDoubleMetricFieldMapper) fieldMapper).defaultMetric); } /** @@ -505,8 +510,8 @@ public void testParseNestedValue() throws Exception { * subfields of aggregate_metric_double should not be searchable or exposed in field_caps */ public void testNoSubFieldsIterated() throws IOException { - AggregateDoubleMetricFieldMapper.Metric[] values = AggregateDoubleMetricFieldMapper.Metric.values(); - List subset = randomSubsetOf(randomIntBetween(1, values.length), values); + Metric[] values = Metric.values(); + List subset = randomSubsetOf(randomIntBetween(1, values.length), values); DocumentMapper mapper = createDocumentMapper( fieldMapping(b -> b.field("type", CONTENT_TYPE).field(METRICS_FIELD, subset).field(DEFAULT_METRIC, subset.get(0))) ); @@ -589,11 +594,58 @@ public void testMetricType() throws IOException { @Override protected SyntheticSourceSupport syntheticSourceSupport() { - throw new AssumptionViolatedException("not supported"); + return new AggregateDoubleMetricSyntheticSourceSupport(); } @Override protected IngestScriptSupport ingestScriptSupport() { throw new AssumptionViolatedException("not supported"); } + + protected final class AggregateDoubleMetricSyntheticSourceSupport implements SyntheticSourceSupport { + + private final EnumSet storedMetrics = EnumSet.copyOf(randomNonEmptySubsetOf(Arrays.asList(Metric.values()))); + + @Override + public SyntheticSourceExample example(int maxVals) { + // aggregate_metric_double field does not support arrays + Map value = randomAggregateMetric(); + return new SyntheticSourceExample(value, value, this::mapping); + } + + private Map randomAggregateMetric() { + Map value = new LinkedHashMap<>(storedMetrics.size()); + for (Metric m : storedMetrics) { + if (Metric.value_count == m) { + value.put(m.name(), randomLongBetween(1, 1_000_000)); + } else { + value.put(m.name(), randomDouble()); + } + } + return value; + } + + private void mapping(XContentBuilder b) throws IOException { + String[] metrics = storedMetrics.stream().map(Metric::toString).toArray(String[]::new); + b.field("type", CONTENT_TYPE).array(METRICS_FIELD, metrics).field(DEFAULT_METRIC, metrics[0]); + } + + @Override + public List invalidExample() throws IOException { + return List.of( + new SyntheticSourceInvalidExample( + matchesPattern("field \\[field] of type \\[.+] doesn't support synthetic source because it ignores malformed numbers"), + b -> { + mapping(b); + b.field("ignore_malformed", true); + } + ) + ); + } + } + + @Override + protected boolean supportsCopyTo() { + return false; + } } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml new file mode 100644 index 0000000000000..3e6ebdaca9f45 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml @@ -0,0 +1,53 @@ +constant_keyword: + - skip: + version: " - 8.4.99" + reason: synthetic source support added in 8.5.0 + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + metric: + type: aggregate_metric_double + metrics: [min, max, value_count] + default_metric: max + + - do: + index: + index: test + id: "1" + refresh: false # Do not refresh on every insert so that we get both docs in the same segment + body: + metric: + min: 18.2 + max: 100 + value_count: 50 + + - do: + index: + index: test + id: "2" + refresh: true + body: + metric: + min: 10.0 + max: 20.0 + value_count: 5 + + - do: + search: + index: test + body: + query: + ids: + values: [1, 2] + - match: + hits.hits.0._source: + metric: + min: 18.2 + max: 100.0 + value_count: 50 From 857fe2ec8616214b7f3b13b55c0eb4f3ee46bcb8 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 1 Aug 2022 12:08:31 -0600 Subject: [PATCH 038/265] Update version serialization for CCR backport and re-enable BWC tests (#88998) Updates versions for backport (#88995) of original fix (#88875) Relates to #88997 --- build.gradle | 4 ++-- .../elasticsearch/xpack/core/ccr/action/PutFollowAction.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/build.gradle b/build.gradle index cdcf6815c0bec..4d84dcbdb7ffd 100644 --- a/build.gradle +++ b/build.gradle @@ -136,9 +136,9 @@ tasks.register("verifyVersions") { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = false +boolean bwc_tests_enabled = true // place a PR link here when committing bwc changes: -String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/88995" +String bwc_tests_disabled_issue = "" if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 1b340a27bac2e..edb4d9420de6d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -198,7 +198,7 @@ public Request(StreamInput in) throws IOException { } this.parameters = new FollowParameters(in); waitForActiveShards(ActiveShardCount.readFrom(in)); - if (in.getVersion().onOrAfter(Version.V_8_5_0)) { + if (in.getVersion().onOrAfter(Version.V_8_4_0)) { this.dataStreamName = in.readOptionalString(); } } @@ -214,7 +214,7 @@ public void writeTo(StreamOutput out) throws IOException { } parameters.writeTo(out); waitForActiveShards.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_8_5_0)) { + if (out.getVersion().onOrAfter(Version.V_8_4_0)) { out.writeOptionalString(this.dataStreamName); } } From 524543e41c1f1f77f4333bdf5853556aff665b44 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Mon, 1 Aug 2022 21:32:59 +0300 Subject: [PATCH 039/265] Extract least/most available disk space DiskUsage (#88996) --- .../org/elasticsearch/cluster/DiskUsage.java | 116 ++++++++++++++ .../cluster/InternalClusterInfoService.java | 85 +--------- .../elasticsearch/cluster/DiskUsageTests.java | 151 +++++++++--------- 3 files changed, 199 insertions(+), 153 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java b/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java index 69e726d16d0c6..d03444a372e04 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java @@ -8,11 +8,16 @@ package org.elasticsearch.cluster; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -23,6 +28,9 @@ * Encapsulation class used to represent the amount of disk used on a node. */ public class DiskUsage implements ToXContentFragment, Writeable { + + private static final Logger logger = LogManager.getLogger(DiskUsage.class); + final String nodeId; final String nodeName; final String path; @@ -148,4 +156,112 @@ public String toString() { + Strings.format1Decimals(getFreeDiskAsPercentage(), "%") + "]"; } + + /** + * Finds the path with the least available disk space and returns its disk usage. It returns null if there is no + * file system data in the NodeStats or if the total bytes are a negative number. + */ + @Nullable + public static DiskUsage findLeastAvailablePath(NodeStats nodeStats) { + if (nodeStats.getFs() == null) { + logger.warn("node [{}/{}] did not return any filesystem stats", nodeStats.getNode().getName(), nodeStats.getNode().getId()); + return null; + } + + FsInfo.Path leastAvailablePath = null; + for (FsInfo.Path info : nodeStats.getFs()) { + if (leastAvailablePath == null) { + leastAvailablePath = info; + } else if (leastAvailablePath.getAvailable().getBytes() > info.getAvailable().getBytes()) { + leastAvailablePath = info; + } + } + if (leastAvailablePath == null) { + logger.warn("node [{}/{}] did not return any filesystem stats", nodeStats.getNode().getName(), nodeStats.getNode().getId()); + return null; + } + + final String nodeId = nodeStats.getNode().getId(); + final String nodeName = nodeStats.getNode().getName(); + if (logger.isTraceEnabled()) { + logger.trace( + "node [{}]: least available: total: {}, available: {}", + nodeId, + leastAvailablePath.getTotal(), + leastAvailablePath.getAvailable() + ); + } + if (leastAvailablePath.getTotal().getBytes() < 0) { + if (logger.isTraceEnabled()) { + logger.trace( + "node: [{}] least available path has less than 0 total bytes of disk [{}]", + nodeId, + leastAvailablePath.getTotal().getBytes() + ); + } + return null; + } else { + return new DiskUsage( + nodeId, + nodeName, + leastAvailablePath.getPath(), + leastAvailablePath.getTotal().getBytes(), + leastAvailablePath.getAvailable().getBytes() + ); + } + } + + /** + * Finds the path with the most available disk space and returns its disk usage. It returns null if there are no + * file system data in the node stats or if the total bytes are a negative number. + */ + @Nullable + public static DiskUsage findMostAvailable(NodeStats nodeStats) { + if (nodeStats.getFs() == null) { + logger.warn("node [{}/{}] did not return any filesystem stats", nodeStats.getNode().getName(), nodeStats.getNode().getId()); + return null; + } + + FsInfo.Path mostAvailablePath = null; + for (FsInfo.Path info : nodeStats.getFs()) { + if (mostAvailablePath == null) { + mostAvailablePath = info; + } else if (mostAvailablePath.getAvailable().getBytes() < info.getAvailable().getBytes()) { + mostAvailablePath = info; + } + } + if (mostAvailablePath == null) { + logger.warn("node [{}/{}] did not return any filesystem stats", nodeStats.getNode().getName(), nodeStats.getNode().getId()); + return null; + } + + final String nodeId = nodeStats.getNode().getId(); + final String nodeName = nodeStats.getNode().getName(); + if (logger.isTraceEnabled()) { + logger.trace( + "node [{}]: most available: total: {}, available: {}", + nodeId, + mostAvailablePath.getTotal(), + mostAvailablePath.getAvailable() + ); + } + if (mostAvailablePath.getTotal().getBytes() < 0) { + if (logger.isTraceEnabled()) { + logger.trace( + "node: [{}] most available path has less than 0 total bytes of disk [{}]", + nodeId, + mostAvailablePath.getTotal().getBytes() + ); + } + return null; + } else { + return new DiskUsage( + nodeId, + nodeName, + mostAvailablePath.getPath(), + mostAvailablePath.getTotal().getBytes(), + mostAvailablePath.getAvailable().getBytes() + ); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index a335220ccfb25..71129b0ba0b37 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -35,7 +35,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.StoreStats; -import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -449,90 +448,20 @@ static void buildShardLevelInfo( } } - static void fillDiskUsagePerNode( + private static void fillDiskUsagePerNode( List nodeStatsArray, Map newLeastAvailableUsages, Map newMostAvailableUsages ) { for (NodeStats nodeStats : nodeStatsArray) { - if (nodeStats.getFs() == null) { - logger.warn("node [{}/{}] did not return any filesystem stats", nodeStats.getNode().getName(), nodeStats.getNode().getId()); - continue; + DiskUsage leastAvailableUsage = DiskUsage.findLeastAvailablePath(nodeStats); + if (leastAvailableUsage != null) { + newLeastAvailableUsages.put(nodeStats.getNode().getId(), leastAvailableUsage); } - - FsInfo.Path leastAvailablePath = null; - FsInfo.Path mostAvailablePath = null; - for (FsInfo.Path info : nodeStats.getFs()) { - if (leastAvailablePath == null) { - // noinspection ConstantConditions this assertion is for the benefit of readers, it's always true - assert mostAvailablePath == null; - mostAvailablePath = leastAvailablePath = info; - } else if (leastAvailablePath.getAvailable().getBytes() > info.getAvailable().getBytes()) { - leastAvailablePath = info; - } else if (mostAvailablePath.getAvailable().getBytes() < info.getAvailable().getBytes()) { - mostAvailablePath = info; - } - } - if (leastAvailablePath == null) { - // noinspection ConstantConditions this assertion is for the benefit of readers, it's always true - assert mostAvailablePath == null; - logger.warn("node [{}/{}] did not return any filesystem stats", nodeStats.getNode().getName(), nodeStats.getNode().getId()); - continue; + DiskUsage mostAvailableUsage = DiskUsage.findMostAvailable(nodeStats); + if (mostAvailableUsage != null) { + newMostAvailableUsages.put(nodeStats.getNode().getId(), mostAvailableUsage); } - - final String nodeId = nodeStats.getNode().getId(); - final String nodeName = nodeStats.getNode().getName(); - if (logger.isTraceEnabled()) { - logger.trace( - "node [{}]: most available: total: {}, available: {} / least available: total: {}, available: {}", - nodeId, - mostAvailablePath.getTotal(), - mostAvailablePath.getAvailable(), - leastAvailablePath.getTotal(), - leastAvailablePath.getAvailable() - ); - } - if (leastAvailablePath.getTotal().getBytes() < 0) { - if (logger.isTraceEnabled()) { - logger.trace( - "node: [{}] least available path has less than 0 total bytes of disk [{}], skipping", - nodeId, - leastAvailablePath.getTotal().getBytes() - ); - } - } else { - newLeastAvailableUsages.put( - nodeId, - new DiskUsage( - nodeId, - nodeName, - leastAvailablePath.getPath(), - leastAvailablePath.getTotal().getBytes(), - leastAvailablePath.getAvailable().getBytes() - ) - ); - } - if (mostAvailablePath.getTotal().getBytes() < 0) { - if (logger.isTraceEnabled()) { - logger.trace( - "node: [{}] most available path has less than 0 total bytes of disk [{}], skipping", - nodeId, - mostAvailablePath.getTotal().getBytes() - ); - } - } else { - newMostAvailableUsages.put( - nodeId, - new DiskUsage( - nodeId, - nodeName, - mostAvailablePath.getPath(), - mostAvailablePath.getTotal().getBytes(), - mostAvailablePath.getAvailable().getBytes() - ) - ); - } - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 9d4d03592c7ef..69cd7de2f047d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -25,9 +25,7 @@ import org.elasticsearch.test.ESTestCase; import java.nio.file.Path; -import java.util.Arrays; import java.util.HashMap; -import java.util.List; import java.util.Map; import static java.util.Collections.emptyMap; @@ -146,20 +144,13 @@ public void testFillShardLevelInfo() { assertEquals(test1Path.getParent().getParent().getParent().toAbsolutePath().toString(), routingToPath.get(test_1)); } - public void testFillDiskUsage() { - Map newLeastAvaiableUsages = new HashMap<>(); - Map newMostAvaiableUsages = new HashMap<>(); - FsInfo.Path[] node1FSInfo = new FsInfo.Path[] { - new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80), - new FsInfo.Path("/least", "/dev/sdb", 200, 190, 70), - new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280), }; - FsInfo.Path[] node2FSInfo = new FsInfo.Path[] { new FsInfo.Path("/least_most", "/dev/sda", 100, 90, 80), }; - - FsInfo.Path[] node3FSInfo = new FsInfo.Path[] { - new FsInfo.Path("/least", "/dev/sda", 100, 90, 70), - new FsInfo.Path("/most", "/dev/sda", 100, 90, 80), }; - List nodeStats = Arrays.asList( - new NodeStats( + public void testLeastAndMostAvailableDiskSpace() { + { + FsInfo.Path[] nodeFSInfo = new FsInfo.Path[] { + new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80), + new FsInfo.Path("/least", "/dev/sdb", 200, 190, 70), + new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280), }; + NodeStats nodeStats = new NodeStats( new DiscoveryNode("node_1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null, @@ -167,7 +158,7 @@ public void testFillDiskUsage() { null, null, null, - new FsInfo(0, null, node1FSInfo), + new FsInfo(0, null, nodeFSInfo), null, null, null, @@ -177,8 +168,16 @@ public void testFillDiskUsage() { null, null, null - ), - new NodeStats( + ); + DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); + DiskUsage mostNode = DiskUsage.findMostAvailable(nodeStats); + assertDiskUsage(mostNode, nodeFSInfo[2]); + assertDiskUsage(leastNode, nodeFSInfo[1]); + } + + { + FsInfo.Path[] nodeFSInfo = new FsInfo.Path[] { new FsInfo.Path("/least_most", "/dev/sda", 100, 90, 80), }; + NodeStats nodeStats = new NodeStats( new DiscoveryNode("node_2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null, @@ -186,7 +185,7 @@ public void testFillDiskUsage() { null, null, null, - new FsInfo(0, null, node2FSInfo), + new FsInfo(0, null, nodeFSInfo), null, null, null, @@ -196,8 +195,18 @@ public void testFillDiskUsage() { null, null, null - ), - new NodeStats( + ); + DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); + DiskUsage mostNode = DiskUsage.findMostAvailable(nodeStats); + assertDiskUsage(leastNode, nodeFSInfo[0]); + assertDiskUsage(mostNode, nodeFSInfo[0]); + } + + { + FsInfo.Path[] nodeFSInfo = new FsInfo.Path[] { + new FsInfo.Path("/least", "/dev/sda", 100, 90, 70), + new FsInfo.Path("/most", "/dev/sda", 100, 90, 80), }; + NodeStats nodeStats = new NodeStats( new DiscoveryNode("node_3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null, @@ -205,7 +214,7 @@ public void testFillDiskUsage() { null, null, null, - new FsInfo(0, null, node3FSInfo), + new FsInfo(0, null, nodeFSInfo), null, null, null, @@ -215,39 +224,22 @@ public void testFillDiskUsage() { null, null, null - ) - ); - InternalClusterInfoService.fillDiskUsagePerNode(nodeStats, newLeastAvaiableUsages, newMostAvaiableUsages); - DiskUsage leastNode_1 = newLeastAvaiableUsages.get("node_1"); - DiskUsage mostNode_1 = newMostAvaiableUsages.get("node_1"); - assertDiskUsage(mostNode_1, node1FSInfo[2]); - assertDiskUsage(leastNode_1, node1FSInfo[1]); - - DiskUsage leastNode_2 = newLeastAvaiableUsages.get("node_2"); - DiskUsage mostNode_2 = newMostAvaiableUsages.get("node_2"); - assertDiskUsage(leastNode_2, node2FSInfo[0]); - assertDiskUsage(mostNode_2, node2FSInfo[0]); - - DiskUsage leastNode_3 = newLeastAvaiableUsages.get("node_3"); - DiskUsage mostNode_3 = newMostAvaiableUsages.get("node_3"); - assertDiskUsage(leastNode_3, node3FSInfo[0]); - assertDiskUsage(mostNode_3, node3FSInfo[1]); + ); + DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); + DiskUsage mostNode = DiskUsage.findMostAvailable(nodeStats); + assertDiskUsage(leastNode, nodeFSInfo[0]); + assertDiskUsage(mostNode, nodeFSInfo[1]); + } } - public void testFillDiskUsageSomeInvalidValues() { - Map newLeastAvailableUsages = new HashMap<>(); - Map newMostAvailableUsages = new HashMap<>(); - FsInfo.Path[] node1FSInfo = new FsInfo.Path[] { - new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80), - new FsInfo.Path("/least", "/dev/sdb", -1, -1, -1), - new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280), }; - FsInfo.Path[] node2FSInfo = new FsInfo.Path[] { new FsInfo.Path("/least_most", "/dev/sda", -1, -1, -1), }; + public void testLeastAndMostAvailableDiskSpaceSomeInvalidValues() { + { + FsInfo.Path[] nodeFSInfo = new FsInfo.Path[] { + new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80), + new FsInfo.Path("/least", "/dev/sdb", -1, -1, -1), + new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280), }; - FsInfo.Path[] node3FSInfo = new FsInfo.Path[] { - new FsInfo.Path("/most", "/dev/sda", 100, 90, 70), - new FsInfo.Path("/least", "/dev/sda", 10, -1, 0), }; - List nodeStats = Arrays.asList( - new NodeStats( + NodeStats nodeStats = new NodeStats( new DiscoveryNode("node_1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null, @@ -255,7 +247,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, - new FsInfo(0, null, node1FSInfo), + new FsInfo(0, null, nodeFSInfo), null, null, null, @@ -265,8 +257,17 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null - ), - new NodeStats( + ); + DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); + DiskUsage mostNode = DiskUsage.findMostAvailable(nodeStats); + assertNull("node_1 should have been skipped", leastNode); + assertDiskUsage(mostNode, nodeFSInfo[2]); + + } + + { + FsInfo.Path[] nodeFSInfo = new FsInfo.Path[] { new FsInfo.Path("/least_most", "/dev/sda", -1, -1, -1), }; + NodeStats nodeStats = new NodeStats( new DiscoveryNode("node_2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null, @@ -274,7 +275,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, - new FsInfo(0, null, node2FSInfo), + new FsInfo(0, null, nodeFSInfo), null, null, null, @@ -284,8 +285,18 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null - ), - new NodeStats( + ); + DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); + DiskUsage mostNode = DiskUsage.findMostAvailable(nodeStats); + assertNull("node_2 should have been skipped", leastNode); + assertNull("node_2 should have been skipped", mostNode); + } + + { + FsInfo.Path[] node3FSInfo = new FsInfo.Path[] { + new FsInfo.Path("/most", "/dev/sda", 100, 90, 70), + new FsInfo.Path("/least", "/dev/sda", 10, -1, 0), }; + NodeStats nodeStats = new NodeStats( new DiscoveryNode("node_3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null, @@ -303,23 +314,13 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null - ) - ); - InternalClusterInfoService.fillDiskUsagePerNode(nodeStats, newLeastAvailableUsages, newMostAvailableUsages); - DiskUsage leastNode_1 = newLeastAvailableUsages.get("node_1"); - DiskUsage mostNode_1 = newMostAvailableUsages.get("node_1"); - assertNull("node1 should have been skipped", leastNode_1); - assertDiskUsage(mostNode_1, node1FSInfo[2]); + ); - DiskUsage leastNode_2 = newLeastAvailableUsages.get("node_2"); - DiskUsage mostNode_2 = newMostAvailableUsages.get("node_2"); - assertNull("node2 should have been skipped", leastNode_2); - assertNull("node2 should have been skipped", mostNode_2); - - DiskUsage leastNode_3 = newLeastAvailableUsages.get("node_3"); - DiskUsage mostNode_3 = newMostAvailableUsages.get("node_3"); - assertDiskUsage(leastNode_3, node3FSInfo[1]); - assertDiskUsage(mostNode_3, node3FSInfo[0]); + DiskUsage leastNode = DiskUsage.findLeastAvailablePath(nodeStats); + DiskUsage mostNode = DiskUsage.findMostAvailable(nodeStats); + assertDiskUsage(leastNode, node3FSInfo[1]); + assertDiskUsage(mostNode, node3FSInfo[0]); + } } private void assertDiskUsage(DiskUsage usage, FsInfo.Path path) { From 352a688b041746a669879022b6b1934f8a011892 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 1 Aug 2022 13:42:34 -0500 Subject: [PATCH 040/265] Eliminating initial delay of CoordinationDiagnosticsService#beginPollingClusterFormationInfo for integration tests (#89001) --- .../coordination/CoordinationDiagnosticsServiceIT.java | 2 ++ .../coordination/CoordinationDiagnosticsService.java | 9 ++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java index 9f4d1fad8eef3..1cf1b19359cbf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.BlockClusterStateProcessing; import org.elasticsearch.threadpool.Scheduler; @@ -69,6 +70,7 @@ public void testBlockClusterStateProcessingOnOneNode() throws Exception { diagnosticsOnBlockedNode.clusterFormationResponses = nodeToClusterFormationStateMap; diagnosticsOnBlockedNode.clusterFormationInfoTasks = cancellables; + diagnosticsOnBlockedNode.remoteRequestInitialDelay = TimeValue.ZERO; diagnosticsOnBlockedNode.beginPollingClusterFormationInfo( nodesWithoutBlockedNode, nodeToClusterFormationStateMap::put, diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index 9e0b266697e69..3987550436ff0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -99,6 +99,13 @@ public class CoordinationDiagnosticsService implements ClusterStateListener { // Non-private for testing volatile ConcurrentMap clusterFormationResponses = null; + /** + * This is the amount of time that we wait before scheduling a remote request to gather diagnostic information. It is not + * user-configurable, but is non-final so that integration tests don't have to waste 10 seconds. + */ + // Non-private for testing + TimeValue remoteRequestInitialDelay = new TimeValue(10, TimeUnit.SECONDS); + private static final Logger logger = LogManager.getLogger(CoordinationDiagnosticsService.class); /** @@ -804,7 +811,7 @@ private Scheduler.Cancellable fetchClusterFormationInfo( connectionListener ); } - }, new TimeValue(10, TimeUnit.SECONDS), ThreadPool.Names.SAME); + }, remoteRequestInitialDelay, ThreadPool.Names.SAME); } // Non-private for testing From d5ea39b2e870db16a6cc8e680d3148e1837236ab Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 1 Aug 2022 19:59:50 +0100 Subject: [PATCH 041/265] Clean up network setting docs (#88929) Clean up network setting docs - Add types for all params - Remove mention of JDKs before 11 - Clarify some wording Co-authored-by: Stef Nestor --- docs/reference/modules/http.asciidoc | 128 ++++++++++++---------- docs/reference/modules/network.asciidoc | 78 ++++++------- docs/reference/modules/transport.asciidoc | 103 +++++++++-------- 3 files changed, 158 insertions(+), 151 deletions(-) diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index 9c4925fb1298d..67818f91f20c8 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -6,7 +6,7 @@ independently of the <>. You can also configure both interfaces together using the <>. `http.host`:: -(<>) +(<>, string) Sets the address of this node for HTTP traffic. The node will bind to this address and will also use it as its HTTP publish address. Accepts an IP address, a hostname, or a <>. @@ -16,7 +16,7 @@ transport and HTTP interfaces. Defaults to the address given by `network.host`. `http.bind_host`:: -(<>) +(<>, string) The network address(es) to which the node should bind in order to listen for incoming HTTP connections. Accepts a list of IP addresses, hostnames, and <>. Defaults to the address given by @@ -26,7 +26,7 @@ binding, and you also require different binding configurations for the transport and HTTP interfaces. `http.publish_host`:: -(<>) +(<>, string) The network address for HTTP clients to contact the node using sniffing. Accepts an IP address, a hostname, or a <>. Defaults to the address given by `http.host` or @@ -36,27 +36,27 @@ and you also require different binding configurations for the transport and HTTP interfaces. `http.publish_port`:: -(<>) +(<>, integer) The port of the <>. Configure this setting only if you need the publish port to be different from `http.port`. Defaults to the port assigned via `http.port`. `http.max_content_length`:: -(<>) +(<>, <>) Maximum size of an HTTP request body. Defaults to `100mb`. `http.max_initial_line_length`:: -(<>) +(<>, <>) Maximum size of an HTTP URL. Defaults to `4kb`. `http.max_header_size`:: -(<>) +(<>, <>) Maximum size of allowed headers. Defaults to `16kb`. [[http-compression]] // tag::http-compression-tag[] `http.compression` {ess-icon}:: -(<>) +(<>, boolean) Support for compression when possible (with Accept-Encoding). If HTTPS is enabled, defaults to `false`. Otherwise, defaults to `true`. + Disabling compression for HTTPS mitigates potential security risks, such as a @@ -65,13 +65,13 @@ you must explicitly set `http.compression` to `true`. // end::http-compression-tag[] `http.compression_level`:: -(<>) +(<>, integer) Defines the compression level to use for HTTP responses. Valid values are in the range of 1 (minimum compression) and 9 (maximum compression). Defaults to `3`. [[http-cors-enabled]] // tag::http-cors-enabled-tag[] `http.cors.enabled` {ess-icon}:: -(<>) +(<>, boolean) Enable or disable cross-origin resource sharing, which determines whether a browser on another origin can execute requests against {es}. Set to `true` to enable {es} to process pre-flight {wikipedia}/Cross-origin_resource_sharing[CORS] requests. {es} will respond to those requests with the `Access-Control-Allow-Origin` header if the `Origin` sent in the request is permitted by the `http.cors.allow-origin` list. Set to `false` (the default) to make {es} ignore the `Origin` request header, effectively disabling CORS requests because {es} will never respond with the `Access-Control-Allow-Origin` response header. @@ -85,7 +85,7 @@ compromised. If CORS is not enabled on {es}, the only way for the client to know [[http-cors-allow-origin]] // tag::http-cors-allow-origin-tag[] `http.cors.allow-origin` {ess-icon}:: -(<>) +(<>, string) Which origins to allow. If you prepend and append a forward slash (`/`) to the value, this will be treated as a regular expression, allowing you to support HTTP and HTTPs. For example, using `/https?:\/\/localhost(:[0-9]+)?/` would return the request header appropriately in both cases. Defaults to no origins allowed. + IMPORTANT: A wildcard (`*`) is a valid value but is considered a security risk, as your {es} instance is open to cross origin requests from *anywhere*. @@ -95,28 +95,30 @@ IMPORTANT: A wildcard (`*`) is a valid value but is considered a security risk, [[http-cors-max-age]] // tag::http-cors-max-age-tag[] `http.cors.max-age` {ess-icon}:: -(<>) -Browsers send a "preflight" OPTIONS-request to determine CORS settings. `max-age` defines how long the result should be cached for. Defaults to `1728000` (20 days). +(<>, integer) +Browsers send a "preflight" OPTIONS-request to determine CORS settings. +`max-age` defines for how long, in seconds, the result should be cached. +Defaults to `1728000` (20 days). // end::http-cors-max-age-tag[] [[http-cors-allow-methods]] // tag::http-cors-allow-methods-tag[] `http.cors.allow-methods` {ess-icon}:: -(<>) +(<>, string) Which methods to allow. Defaults to `OPTIONS, HEAD, GET, POST, PUT, DELETE`. // end::http-cors-allow-methods-tag[] [[http-cors-allow-headers]] // tag::http-cors-allow-headers-tag[] `http.cors.allow-headers` {ess-icon}:: -(<>) +(<>, string) Which headers to allow. Defaults to `X-Requested-With, Content-Type, Content-Length`. // end::http-cors-allow-headers-tag[] [[http-cors-allow-credentials]] // tag::http-cors-allow-credentials-tag[] `http.cors.allow-credentials` {ess-icon}:: -(<>) +(<>, boolean) Whether the `Access-Control-Allow-Credentials` header should be returned. Defaults to `false`. + NOTE: This header is only returned when the setting is set to `true`. @@ -124,80 +126,86 @@ NOTE: This header is only returned when the setting is set to `true`. // end::http-cors-allow-credentials-tag[] `http.detailed_errors.enabled`:: -(<>) -If `true`, enables the output of detailed error messages and stack traces in the response output. Defaults to `true`. -+ -If `false`, use the `error_trace` parameter to <> and return detailed error messages. Otherwise, only a simple message will be returned. +(<>, boolean) +Configures whether detailed error reporting in HTTP responses is enabled. +Defaults to `true`, which means that HTTP requests that include the +<> will return a +detailed error message including a stack trace if they encounter an exception. +If set to `false`, requests with the `?error_trace` parameter are rejected. `http.pipelining.max_events`:: -(<>) +(<>, integer) The maximum number of events to be queued up in memory before an HTTP connection is closed, defaults to `10000`. `http.max_warning_header_count`:: -(<>) -The maximum number of warning headers in client HTTP responses. Defaults to `unbounded`. +(<>, integer) +The maximum number of warning headers in client HTTP responses. Defaults to +`-1` which means the number of warning headers is unlimited. `http.max_warning_header_size`:: -(<>) -The maximum total size of warning headers in client HTTP responses. Defaults to `unbounded`. - -`http.tcp.no_delay`:: -(<>) -Enable or disable the {wikipedia}/Nagle%27s_algorithm[TCP no delay] -setting. Defaults to `network.tcp.no_delay`. +(<>, <>) +The maximum total size of warning headers in client HTTP responses. Defaults to +`-1` which means the size of the warning headers is unlimited. `http.tcp.keep_alive`:: -(<>) -Configures the `SO_KEEPALIVE` option for this socket, which -determines whether it sends TCP keepalive probes. -Defaults to `network.tcp.keep_alive`. +(<>, boolean) +Configures the `SO_KEEPALIVE` option for this socket, which determines whether +it sends TCP keepalive probes. Defaults to `network.tcp.keep_alive`. `http.tcp.keep_idle`:: -(<>) Configures the `TCP_KEEPIDLE` option for this socket, which -determines the time in seconds that a connection must be idle before -starting to send TCP keepalive probes. Defaults to `network.tcp.keep_idle`, which -uses the system default. This value cannot exceed `300` seconds. Only applicable on -Linux and macOS, and requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPIDLE` option for HTTP sockets, which determines the +time in seconds that a connection must be idle before starting to send TCP +keepalive probes. Defaults to `network.tcp.keep_idle`, which uses the system +default. This value cannot exceed `300` seconds. Only applicable on Linux and +macOS. `http.tcp.keep_interval`:: -(<>) Configures the `TCP_KEEPINTVL` option for this socket, -which determines the time in seconds between sending TCP keepalive probes. -Defaults to `network.tcp.keep_interval`, which uses the system default. -This value cannot exceed `300` seconds. Only applicable on Linux and macOS, and requires -Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPINTVL` option for HTTP sockets, which determines the +time in seconds between sending TCP keepalive probes. Defaults to +`network.tcp.keep_interval`, which uses the system default. This value cannot +exceed `300` seconds. Only applicable on Linux and macOS. `http.tcp.keep_count`:: -(<>) Configures the `TCP_KEEPCNT` option for this socket, which -determines the number of unacknowledged TCP keepalive probes that may be -sent on a connection before it is dropped. Defaults to `network.tcp.keep_count`, -which uses the system default. Only applicable on Linux and macOS, and -requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPCNT` option for HTTP sockets, which determines the +number of unacknowledged TCP keepalive probes that may be sent on a connection +before it is dropped. Defaults to `network.tcp.keep_count`, which uses the +system default. Only applicable on Linux and macOS. + +`http.tcp.no_delay`:: +(<>, boolean) +Configures the `TCP_NODELAY` option on HTTP sockets, which determines whether +{wikipedia}/Nagle%27s_algorithm[TCP no delay] is enabled. Defaults to `true`. `http.tcp.reuse_address`:: -(<>) -Should an address be reused or not. Defaults to `network.tcp.reuse_address`. +(<>, boolean) +Configures the `SO_REUSEADDR` option for HTTP sockets, which determines whether +the address can be reused or not. Defaults to `false` on Windows and `true` +otherwise. `http.tcp.send_buffer_size`:: -(<>) -The size of the TCP send buffer (specified with <>). -Defaults to `network.tcp.send_buffer_size`. +(<>, <>) +The size of the TCP send buffer for HTTP traffic. Defaults to +`network.tcp.send_buffer_size`. `http.tcp.receive_buffer_size`:: -(<>) -The size of the TCP receive buffer (specified with <>). -Defaults to `network.tcp.receive_buffer_size`. +(<>, <>) +The size of the TCP receive buffer for HTTP traffic. Defaults to +`network.tcp.receive_buffer_size`. `http.client_stats.enabled`:: -(<>) +(<>, boolean) Enable or disable collection of HTTP client stats. Defaults to `true`. `http.client_stats.closed_channels.max_count`:: -(<>) +(<>, integer) When `http.client_stats.enabled` is `true`, sets the maximum number of closed HTTP channels for which {es} reports statistics. Defaults to `10000`. `http.client_stats.closed_channels.max_age`:: -(<>) +(<>, <>) When `http.client_stats.enabled` is `true`, sets the maximum length of time after closing a HTTP channel that {es} will report that channel's statistics. Defaults to `5m`. diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index e1d6500e7749d..240decbcca4d9 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -38,7 +38,7 @@ proceeding. Most users will need to configure only the following network settings. `network.host`:: -(<>) +(<>, string) Sets the address of this node for both HTTP and transport traffic. The node will bind to this address and will also use it as its publish address. Accepts an IP address, a hostname, or a <>. @@ -46,7 +46,7 @@ an IP address, a hostname, or a <>. Defaults to `_local_`. `http.port`:: -(<>) +(<>, integer) The port to bind for HTTP client communication. Accepts a single value or a range. If a range is specified, the node will bind to the first available port in the range. @@ -54,7 +54,7 @@ in the range. Defaults to `9200-9300`. `transport.port`:: -(<>) +(<>, integer) The port to bind for communication between nodes. Accepts a single value or a range. If a range is specified, the node will bind to the first available port in the range. Set this setting to a single port, not a range, on every @@ -170,7 +170,7 @@ you should not use them if you can use the <> instead. `network.bind_host`:: -(<>) +(<>, string) The network address(es) to which the node should bind in order to listen for incoming connections. Accepts a list of IP addresses, hostnames, and <>. Defaults to the address given by @@ -178,7 +178,7 @@ incoming connections. Accepts a list of IP addresses, hostnames, and different addresses for publishing and binding. `network.publish_host`:: -(<>) +(<>, string) The network address that clients and other nodes can use to contact this node. Accepts an IP address, a hostname, or a <>. Defaults to the address given by `network.host`. Use this setting only @@ -199,53 +199,53 @@ each node is accessible at all possible publish addresses. Use the following settings to control the low-level parameters of the TCP connections used by the HTTP and transport interfaces. -`network.tcp.no_delay`:: -(<>) -Enable or disable the {wikipedia}/Nagle%27s_algorithm[TCP no delay] -setting. Defaults to `true`. - `network.tcp.keep_alive`:: -(<>) -Configures the `SO_KEEPALIVE` option for this socket, which -determines whether it sends TCP keepalive probes. +(<>, boolean) +Configures the `SO_KEEPALIVE` option for network sockets, which determines +whether each connection sends TCP keepalive probes. Defaults to `true`. `network.tcp.keep_idle`:: -(<>) -Configures the `TCP_KEEPIDLE` option for this socket, which -determines the time in seconds that a connection must be idle before -starting to send TCP keepalive probes. Defaults to `-1`, which uses -the system default. This value cannot exceed `300` seconds. Only applicable on Linux and macOS, -and requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPIDLE` option for network sockets, which determines the +time in seconds that a connection must be idle before starting to send TCP +keepalive probes. Defaults to `-1`, which means to use the system default. This +value cannot exceed `300` seconds. Only applicable on Linux and macOS. `network.tcp.keep_interval`:: -(<>) -Configures the `TCP_KEEPINTVL` option for this socket, -which determines the time in seconds between sending TCP keepalive probes. -Defaults to `-1`, which uses the system default. This value cannot exceed `300` seconds. -Only applicable on Linux and macOS, and requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPINTVL` option for network sockets, which determines the +time in seconds between sending TCP keepalive probes. Defaults to `-1`, which +means to use the system default. This value cannot exceed `300` seconds. Only +applicable on Linux and macOS. `network.tcp.keep_count`:: -(<>) -Configures the `TCP_KEEPCNT` option for this socket, which -determines the number of unacknowledged TCP keepalive probes that may be -sent on a connection before it is dropped. Defaults to `-1`, -which uses the system default. Only applicable on Linux and macOS, and requires -Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPCNT` option for network sockets, which determines the +number of unacknowledged TCP keepalive probes that may be sent on a connection +before it is dropped. Defaults to `-1`, which means to use the system default. +Only applicable on Linux and macOS. + +`network.tcp.no_delay`:: +(<>, boolean) +Configures the `TCP_NODELAY` option on network sockets, which determines +whether {wikipedia}/Nagle%27s_algorithm[TCP no delay] is enabled. Defaults to +`true`. `network.tcp.reuse_address`:: -(<>) -Should an address be reused or not. Defaults to `true` on non-windows -machines. +(<>, boolean) +Configures the `SO_REUSEADDR` option for network sockets, which determines +whether the address can be reused or not. Defaults to `false` on Windows and +`true` otherwise. `network.tcp.send_buffer_size`:: -(<>) -The size of the TCP send buffer (specified with <>). -By default not explicitly set. +(<>, <>) +Configures the size of the TCP send buffer for network sockets. Defaults to +`-1` which means to use the system default. `network.tcp.receive_buffer_size`:: -(<>) -The size of the TCP receive buffer (specified with <>). -By default not explicitly set. +(<>, <>) +Configures the size of the TCP receive buffer. Defaults to `-1` which means to +use the system default. include::http.asciidoc[] diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index d97c2d432fa92..3663422a36305 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -7,7 +7,7 @@ independently of the <>. Use the settings>> to configure both interfaces together. `transport.host`:: -(<>) +(<>, string) Sets the address of this node for transport traffic. The node will bind to this address and will also use it as its transport publish address. Accepts an IP address, a hostname, or a <>. @@ -17,7 +17,7 @@ transport and HTTP interfaces. Defaults to the address given by `network.host`. `transport.bind_host`:: -(<>) +(<>, string) The network address(es) to which the node should bind in order to listen for incoming transport connections. Accepts a list of IP addresses, hostnames, and <>. Defaults to the address given by @@ -27,7 +27,7 @@ binding, and you also require different binding configurations for the transport and HTTP interfaces. `transport.publish_host`:: -(<>) +(<>, string) The network address at which the node can be contacted by other nodes. Accepts an IP address, a hostname, or a <>. Defaults to the address given by `transport.host` or `network.publish_host`. @@ -36,19 +36,19 @@ different addresses for publishing and binding, and you also require different binding configurations for the transport and HTTP interfaces. `transport.publish_port`:: -(<>) +(<>, integer) The port of the <>. Set this parameter only if you need the publish port to be different from `transport.port`. Defaults to the port assigned via `transport.port`. `transport.connect_timeout`:: -(<>) +(<>, <>) The connect timeout for initiating a new connection (in time setting format). Defaults to `30s`. `transport.compress`:: -(<>) +(<>, string) Set to `true`, `indexing_data`, or `false` to configure transport compression between nodes. The option `true` will compress all data. The option `indexing_data` will compress only the raw index data sent between nodes during @@ -56,72 +56,71 @@ ingest, ccr following (excluding bootstrap), and operations based shard recovery (excluding transferring lucene files). Defaults to `indexing_data`. `transport.compression_scheme`:: -(<>) +(<>, string) Configures the compression scheme for `transport.compress`. The options are `deflate` or `lz4`. If `lz4` is configured and the remote node has not been upgraded to a version supporting `lz4`, the traffic will be sent uncompressed. Defaults to `lz4`. `transport.ping_schedule`:: -(<>) -Schedule a regular application-level ping message -to ensure that transport connections between nodes are kept alive. Defaults to -`5s` in the transport client and `-1` (disabled) elsewhere. It is preferable -to correctly configure TCP keep-alives instead of using this feature, because -TCP keep-alives apply to all kinds of long-lived connections and not just to -transport connections. - -`transport.tcp.no_delay`:: -(<>) -Enable or disable the {wikipedia}/Nagle%27s_algorithm[TCP no delay] -setting. Defaults to `network.tcp.no_delay`. +(<>, <>) +Schedule a regular application-level ping message to ensure that transport +connections between nodes are kept alive. Defaults to `5s` in the transport +client and `-1` (disabled) elsewhere. It is preferable to correctly configure +TCP keep-alives instead of using this feature, because TCP keep-alives apply to +all kinds of long-lived connections and not just to transport connections. `transport.tcp.keep_alive`:: -(<>) -Configures the `SO_KEEPALIVE` option for this socket, which -determines whether it sends TCP keepalive probes. -Defaults to `network.tcp.keep_alive`. +(<>, boolean) +Configures the `SO_KEEPALIVE` option for transport sockets, which determines +whether they send TCP keepalive probes. Defaults to `network.tcp.keep_alive`. `transport.tcp.keep_idle`:: -(<>) -Configures the `TCP_KEEPIDLE` option for this socket, which -determines the time in seconds that a connection must be idle before -starting to send TCP keepalive probes. Defaults to `network.tcp.keep_idle` if set, -or the system default otherwise. -This value cannot exceed `300` seconds. In cases where the system default -is higher than `300`, the value is automatically lowered to `300`. Only applicable on -Linux and macOS, and requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPIDLE` option for transport sockets, which determines +the time in seconds that a connection must be idle before starting to send TCP +keepalive probes. Defaults to `network.tcp.keep_idle` if set, or the system +default otherwise. This value cannot exceed `300` seconds. In cases where the +system default is higher than `300`, the value is automatically lowered to +`300`. Only applicable on Linux and macOS. `transport.tcp.keep_interval`:: -(<>) -Configures the `TCP_KEEPINTVL` option for this socket, -which determines the time in seconds between sending TCP keepalive probes. -Defaults to `network.tcp.keep_interval` if set, or the system default otherwise. -This value cannot exceed `300` seconds. In cases where the system default is higher than `300`, -the value is automatically lowered to `300`. Only applicable on Linux and macOS, -and requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPINTVL` option for transport sockets, which determines +the time in seconds between sending TCP keepalive probes. Defaults to +`network.tcp.keep_interval` if set, or the system default otherwise. This value +cannot exceed `300` seconds. In cases where the system default is higher than +`300`, the value is automatically lowered to `300`. Only applicable on Linux +and macOS. `transport.tcp.keep_count`:: -(<>) -Configures the `TCP_KEEPCNT` option for this socket, which -determines the number of unacknowledged TCP keepalive probes that may be -sent on a connection before it is dropped. Defaults to `network.tcp.keep_count` -if set, or the system default otherwise. Only applicable on Linux and macOS, and -requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPCNT` option for transport sockets, which determines the +number of unacknowledged TCP keepalive probes that may be sent on a connection +before it is dropped. Defaults to `network.tcp.keep_count` if set, or the +system default otherwise. Only applicable on Linux and macOS. + +`transport.tcp.no_delay`:: +(<>, boolean) +Configures the `TCP_NODELAY` option on transport sockets, which determines +whether {wikipedia}/Nagle%27s_algorithm[TCP no delay] is enabled. Defaults to +`true`. `transport.tcp.reuse_address`:: -(<>) -Should an address be reused or not. Defaults to `network.tcp.reuse_address`. +(<>, boolean) +Configures the `SO_REUSEADDR` option for network sockets, which determines +whether the address can be reused or not. Defaults to +`network.tcp.reuse_address`. `transport.tcp.send_buffer_size`:: -(<>) -The size of the TCP send buffer (specified with <>). -Defaults to `network.tcp.send_buffer_size`. +(<>, <>) +The size of the TCP send buffer for transport traffic. Defaults to +`network.tcp.send_buffer_size`. `transport.tcp.receive_buffer_size`:: -(<>) -The size of the TCP receive buffer (specified with <>). -Defaults to `network.tcp.receive_buffer_size`. +(<>, <>) +The size of the TCP receive buffer for transport traffic. Defaults to +`network.tcp.receive_buffer_size`. [[transport-profiles]] ===== Transport profiles From 2a03ac35a63463b00b1bdd0aa64e4aa9142935d2 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Mon, 1 Aug 2022 21:05:57 +0200 Subject: [PATCH 042/265] Fix compilation in the rescore plugin (#89004) Add source fallback operation when looking up a the factor field added in #88735 Resolves #88985 --- .../elasticsearch/example/rescore/ExampleRescoreBuilder.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java index 7109f6686451b..f599ddb63deff 100644 --- a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java +++ b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.rescore.Rescorer; import org.elasticsearch.search.rescore.RescorerBuilder; @@ -99,7 +100,8 @@ public static ExampleRescoreBuilder fromXContent(XContentParser parser) { @Override public RescoreContext innerBuildContext(int windowSize, SearchExecutionContext context) throws IOException { IndexFieldData factorFieldData = - this.factorField == null ? null : context.getForField(context.getFieldType(this.factorField)); + this.factorField == null ? null : context.getForField(context.getFieldType(this.factorField), + MappedFieldType.FielddataOperation.SEARCH); return new ExampleRescoreContext(windowSize, factor, factorFieldData); } From e94b4befc5f59db2c56eb6b042a735e77c77cd87 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 1 Aug 2022 15:36:42 -0400 Subject: [PATCH 043/265] [ML] mute tests for issue #89008 (#89009) related #89008 --- .../xpack/core/ml/inference/TrainedModelConfigTests.java | 2 ++ .../ml/inference/trainedmodel/QuestionAnsweringConfigTests.java | 2 ++ .../ml/inference/trainedmodel/TextSimilarityConfigTests.java | 2 ++ 3 files changed, 6 insertions(+) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java index 934173b02d426..90b37a67cf6f8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.inference; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesReference; @@ -59,6 +60,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/89008") public class TrainedModelConfigTests extends AbstractBWCSerializationTestCase { private boolean lenient; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java index 2ad335d3cf4b0..4f3b09259f8f9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentParser; @@ -15,6 +16,7 @@ import java.io.IOException; import java.util.function.Predicate; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/89008") public class QuestionAnsweringConfigTests extends InferenceConfigItemTestCase { @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java index 3d2f4b21972f0..77dd5dcf38d61 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentParser; @@ -16,6 +17,7 @@ import java.util.Arrays; import java.util.function.Predicate; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/89008") public class TextSimilarityConfigTests extends InferenceConfigItemTestCase { @Override From a3555eca6baced1fb70ce5c2207188c31f38e76c Mon Sep 17 00:00:00 2001 From: Leaf-Lin <39002973+Leaf-Lin@users.noreply.github.com> Date: Tue, 2 Aug 2022 16:36:27 +1000 Subject: [PATCH 044/265] Add warning on restarting nodes > low watermark As per https://github.com/elastic/elasticsearch/issues/49972 and https://github.com/elastic/elasticsearch/issues/56578, if a node is above low disk threshold when being restarted (rolling restart, network disruption or crash), the disk threshold decider prevents reusing the shard content on the restarted node. The consequence of the event is the node may take a long time to start. --- docs/reference/setup/restart-cluster.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/reference/setup/restart-cluster.asciidoc b/docs/reference/setup/restart-cluster.asciidoc index 6b37eba826f2a..80c99afe18f13 100644 --- a/docs/reference/setup/restart-cluster.asciidoc +++ b/docs/reference/setup/restart-cluster.asciidoc @@ -8,6 +8,9 @@ nodes in the cluster while in the case of <>, you shut down only one node at a time, so the service remains uninterrupted. +[WARNING] +Nodes exceed low watermark will be slow to restart. You may want to reduce disk +usage below low watermark before proceeding to restart nodes. [discrete] [[restart-cluster-full]] From 00eefdd9a018015d511356b14c9d50a1698e88e3 Mon Sep 17 00:00:00 2001 From: Leaf-Lin Date: Tue, 2 Aug 2022 16:44:14 +1000 Subject: [PATCH 045/265] Revert "Add warning on restarting nodes > low watermark" This reverts commit a3555eca6baced1fb70ce5c2207188c31f38e76c. --- docs/reference/setup/restart-cluster.asciidoc | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/reference/setup/restart-cluster.asciidoc b/docs/reference/setup/restart-cluster.asciidoc index 80c99afe18f13..6b37eba826f2a 100644 --- a/docs/reference/setup/restart-cluster.asciidoc +++ b/docs/reference/setup/restart-cluster.asciidoc @@ -8,9 +8,6 @@ nodes in the cluster while in the case of <>, you shut down only one node at a time, so the service remains uninterrupted. -[WARNING] -Nodes exceed low watermark will be slow to restart. You may want to reduce disk -usage below low watermark before proceeding to restart nodes. [discrete] [[restart-cluster-full]] From d0b8caebb9af1821ca3faab495c0baa62b39fa98 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Tue, 2 Aug 2022 09:11:29 +0200 Subject: [PATCH 046/265] Reject unknown request body fields in Mount API (#88987) The parser used to parse Mount API requests is configured to ignore unknown fields. I suspect we made it this way when it was created because we were expecting to change the request's body in the future, but that never happened. This leniency confuses users (#75982) so we think it is better to simply reject requests with unknown fields starting v8.5.0. Because the High Level REST Client has a bug (to be fixed in #79604) that injects a wrong ignored_index_settings we decided to just ignore and not reject that one on purpose. Closes #75982 --- docs/changelog/88987.yaml | 6 + .../MountSearchableSnapshotRequest.java | 15 +- .../resources/rest-api-spec/test/mount.yml | 175 ++++++++++++++++++ 3 files changed, 195 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/88987.yaml create mode 100644 x-pack/plugin/searchable-snapshots/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/mount.yml diff --git a/docs/changelog/88987.yaml b/docs/changelog/88987.yaml new file mode 100644 index 0000000000000..d9b3a8c5729b9 --- /dev/null +++ b/docs/changelog/88987.yaml @@ -0,0 +1,6 @@ +pr: 88987 +summary: Reject unknown request body fields in Mount API +area: Snapshot/Restore +type: bug +issues: + - 75982 diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java index 88bad8d1e20db..02657c384859e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java @@ -38,7 +38,7 @@ public class MountSearchableSnapshotRequest extends MasterNodeRequest PARSER = new ConstructingObjectParser<>( "mount_searchable_snapshot", - true, + false, (a, request) -> new MountSearchableSnapshotRequest( Objects.requireNonNullElse((String) a[1], (String) a[0]), Objects.requireNonNull(request.param("repository")), @@ -56,6 +56,15 @@ public class MountSearchableSnapshotRequest extends MasterNodeRequest { + p.skipChildren(); + return Strings.EMPTY_ARRAY; + }, IGNORED_INDEX_SETTINGS_FIELD, ObjectParser.ValueType.STRING_ARRAY); } /** diff --git a/x-pack/plugin/searchable-snapshots/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/mount.yml b/x-pack/plugin/searchable-snapshots/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/mount.yml new file mode 100644 index 0000000000000..454f6bb19c16b --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/mount.yml @@ -0,0 +1,175 @@ +--- +setup: + + - do: + indices.create: + index: docs + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + refresh_interval: "123s" + + - do: + bulk: + body: + - index: + _index: docs + _id: "1" + - field: foo + - index: + _index: docs + _id: "2" + - field: bar + - index: + _index: docs + _id: "3" + - field: baz + - index: + _index: docs + _id: "4" + - field: bar + - index: + _index: docs + _id: "5" + - field: baz + - index: + _index: docs + _id: "6" + - field: baz + + - do: + snapshot.create_repository: + repository: repository-fs + body: + type: fs + settings: + location: "repository-fs" + + # Remove the snapshot if a previous test failed to delete it. + # Useful for third party tests that runs the test against a real external service. + - do: + snapshot.delete: + repository: repository-fs + snapshot: snapshot + ignore: 404 + + - do: + snapshot.create: + repository: repository-fs + snapshot: snapshot + wait_for_completion: true + + - do: + indices.delete: + index: docs + +--- +teardown: + + - do: + indices.delete: + index: docs-* + +--- +"Test Mount API with ignore_index_settings": + - do: + searchable_snapshots.mount: + repository: repository-fs + snapshot: snapshot + wait_for_completion: true + body: + index: docs + renamed_index: docs-mounted + ignore_index_settings: ["index.refresh_interval"] + + - match: { snapshot.snapshot: snapshot } + - match: { snapshot.shards.failed: 0 } + - match: { snapshot.shards.successful: 1 } + + - do: + indices.get_settings: + include_defaults: true + flat_settings: true + index: docs-mounted + + - match: { docs-mounted.defaults.index\.refresh_interval: "1s" } + + - do: + search: + index: docs-mounted + body: + query: + match_all: {} + + - match: { hits.total.value: 6 } + + - do: + search: + index: docs-mounted + body: + size: 0 + query: + term: + field: bar + + - match: { hits.total.value: 2 } + +--- +"Test Mount API silently ignored special field ignored_index_settings in request body": + - do: + searchable_snapshots.mount: + repository: repository-fs + snapshot: snapshot + wait_for_completion: true + body: + index: docs + renamed_index: docs-with-ignored-settings + ignored_index_settings: ["index.refresh_interval"] + + - match: { snapshot.snapshot: snapshot } + - match: { snapshot.shards.failed: 0 } + - match: { snapshot.shards.successful: 1 } + + - do: + indices.get_settings: + index: docs-with-ignored-settings + + - match: { docs-with-ignored-settings.settings.index.refresh_interval: "123s" } + + - do: + search: + index: docs-with-ignored-settings + body: + query: + match_all: {} + + - match: { hits.total.value: 6 } + + - do: + search: + index: docs-with-ignored-settings + body: + size: 0 + query: + term: + field: bar + + - match: { hits.total.value: 2 } + + +--- +"Test Mount API with unknown request body field": + - skip: + version: " - 8.4.99" + reason: "unknown request body fields are rejected starting version 8.5.0" + - do: + catch: bad_request + searchable_snapshots.mount: + repository: repository-fs + snapshot: snapshot + wait_for_completion: true + body: + index: docs + renamed_index: docs-with-wrong-request-body + wrong_request_body: "This is an unknown field" From e4214efe6df897ae649c88ed1dfab417d57cf045 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 2 Aug 2022 10:03:31 +0200 Subject: [PATCH 047/265] Make it explicit that test expects no rebalancing. (#88993) This is required in case new shards allocator might be more proactive with rebalancing. --- .../indices/recovery/IndexRecoveryIT.java | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index d5385eee26469..510011e183e80 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -54,6 +54,7 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.search.Queries; @@ -120,6 +121,7 @@ import static java.util.stream.Collectors.toList; import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -360,6 +362,15 @@ public void testCancelNewShardRecoveryAndUsesExistingShardCopy() throws Exceptio logger.info("--> start node A"); final String nodeA = internalCluster().startNode(); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) + ) + ); + logger.info("--> create index on node: {}", nodeA); createIndex( INDEX_NAME, From bc840f95bc66cc56f917c54e7c53341d1c524393 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Tue, 2 Aug 2022 10:00:35 +0100 Subject: [PATCH 048/265] Wrap enrich execute action in new tracing context (#89021) Part of #84369. Split out from #88443. This PR wraps parts logic in `InternalExecutePolicyAction` in a new tracing context. This is necessary so that a tracing implementation can use the thread context to propagate tracing headers, but without the code attempting to set the same key twice in the thread context, which is illegal. --- .../action/InternalExecutePolicyAction.java | 104 ++++++++++-------- 1 file changed, 56 insertions(+), 48 deletions(-) diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/InternalExecutePolicyAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/InternalExecutePolicyAction.java index 480acc185f016..5eee0cc296573 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/InternalExecutePolicyAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/InternalExecutePolicyAction.java @@ -88,56 +88,64 @@ protected void doExecute(Task transportTask, Request request, ActionListener headers) { - String description = "executing enrich policy [" + request.getName() + "]"; - return new ExecuteEnrichPolicyTask(id, type, action, description, parentTaskId, headers); - } - }); - - try { - ActionListener listener; - if (request.isWaitForCompletion()) { - listener = ActionListener.wrap(result -> actionListener.onResponse(new Response(result)), actionListener::onFailure); - } else { - listener = ActionListener.wrap(result -> LOGGER.debug("successfully executed policy [{}]", request.getName()), e -> { - if (e instanceof TaskCancelledException) { - LOGGER.info(e.getMessage()); - } else { - LOGGER.error("failed to execute policy [" + request.getName() + "]", e); - } - }); - } - policyExecutor.runPolicyLocally(task, request.getName(), ActionListener.wrap(result -> { + try (var ignored = transportService.getThreadPool().getThreadContext().newTraceContext()) { + // Can't use provided task, because in the case wait_for_completion=false then + // as soon as actionListener#onResponse is invoked then the provided task get unregistered and + // then there no way to see the policy execution in the list tasks or get task APIs. + var task = (ExecuteEnrichPolicyTask) taskManager.register("enrich", TASK_ACTION, new TaskAwareRequest() { + + @Override + public void setParentTask(TaskId taskId) { + request.setParentTask(taskId); + } + + @Override + public TaskId getParentTask() { + return request.getParentTask(); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + String description = "executing enrich policy [" + request.getName() + "]"; + return new ExecuteEnrichPolicyTask(id, type, action, description, parentTaskId, headers); + } + }); + + try { + ActionListener listener; + if (request.isWaitForCompletion()) { + listener = ActionListener.wrap( + result -> actionListener.onResponse(new Response(result)), + actionListener::onFailure + ); + } else { + listener = ActionListener.wrap( + result -> LOGGER.debug("successfully executed policy [{}]", request.getName()), + e -> { + if (e instanceof TaskCancelledException) { + LOGGER.info(e.getMessage()); + } else { + LOGGER.error("failed to execute policy [" + request.getName() + "]", e); + } + } + ); + } + policyExecutor.runPolicyLocally(task, request.getName(), ActionListener.wrap(result -> { + taskManager.unregister(task); + listener.onResponse(result); + }, e -> { + taskManager.unregister(task); + listener.onFailure(e); + })); + + if (request.isWaitForCompletion() == false) { + TaskId taskId = new TaskId(clusterState.nodes().getLocalNodeId(), task.getId()); + actionListener.onResponse(new Response(taskId)); + } + } catch (Exception e) { taskManager.unregister(task); - listener.onResponse(result); - }, e -> { - taskManager.unregister(task); - listener.onFailure(e); - })); - - if (request.isWaitForCompletion() == false) { - TaskId taskId = new TaskId(clusterState.nodes().getLocalNodeId(), task.getId()); - actionListener.onResponse(new Response(taskId)); + throw e; } - } catch (Exception e) { - taskManager.unregister(task); - throw e; } } From 9bed4b89fdea12463f660aee913b74d42b76afaa Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 2 Aug 2022 11:02:00 +0200 Subject: [PATCH 049/265] Preemptively compute RoutingNodes and the indices lookup during publication (#89005) Computing routing nodes and the indices lookup takes considerable time for large states. Both are needed during cluster state application and Prior to this change would be computed on the applier thread in all cases. By running the creation of both objects concurrently to publication, the many shards benchmark sees a 10%+ reduction in the bootstrap time to 50k indices. --- docs/changelog/89005.yaml | 5 +++++ .../elasticsearch/cluster/ClusterState.java | 19 +++++++++++++++---- .../cluster/metadata/Metadata.java | 19 +++++++++++++++---- .../cluster/service/MasterService.java | 4 ++++ .../FakeThreadPoolMasterServiceTests.java | 9 +++++++++ 5 files changed, 48 insertions(+), 8 deletions(-) create mode 100644 docs/changelog/89005.yaml diff --git a/docs/changelog/89005.yaml b/docs/changelog/89005.yaml new file mode 100644 index 0000000000000..374e254798612 --- /dev/null +++ b/docs/changelog/89005.yaml @@ -0,0 +1,5 @@ +pr: 89005 +summary: Preemptively compute `RoutingNodes` and the indices lookup during publication +area: Cluster Coordination +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index e79865a5a8e45..d64df93812ac0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -319,11 +319,22 @@ public Set getVotingConfigExclusions() { * Returns a built (on demand) routing nodes view of the routing table. */ public RoutingNodes getRoutingNodes() { - if (routingNodes != null) { - return routingNodes; + RoutingNodes r = routingNodes; + if (r != null) { + return r; } - routingNodes = RoutingNodes.immutable(routingTable, nodes); - return routingNodes; + r = buildRoutingNodes(); + return r; + } + + private synchronized RoutingNodes buildRoutingNodes() { + RoutingNodes r = routingNodes; + if (r != null) { + return r; + } + r = RoutingNodes.immutable(routingTable, nodes); + routingNodes = r; + return r; } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 3ea7056e0ef99..38d39d64cab4b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -222,7 +222,7 @@ default boolean isRestorable() { private final String[] allClosedIndices; private final String[] visibleClosedIndices; - private SortedMap indicesLookup; + private volatile SortedMap indicesLookup; private final Map mappingsByHash; private final Version oldestIndexVersion; @@ -510,10 +510,21 @@ public boolean equalsAliases(Metadata other) { } public SortedMap getIndicesLookup() { - if (indicesLookup == null) { - indicesLookup = Builder.buildIndicesLookup(custom(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY), indices); + SortedMap lookup = indicesLookup; + if (lookup == null) { + lookup = buildIndicesLookup(); } - return indicesLookup; + return lookup; + } + + private synchronized SortedMap buildIndicesLookup() { + SortedMap i = indicesLookup; + if (i != null) { + return i; + } + i = Builder.buildIndicesLookup(custom(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY), indices); + indicesLookup = i; + return i; } public boolean sameIndicesLookup(Metadata other) { diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 52f3f86ea4f44..906348374c182 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -330,6 +330,10 @@ public String getDescription() { } logger.debug("publishing cluster state version [{}]", newClusterState.version()); + // initialize routing nodes and the indices lookup concurrently, we will need both of them for the cluster state + // application and can compute them while we wait for the other nodes during publication + threadPool.generic().execute(newClusterState::getRoutingNodes); + threadPool.generic().execute(newClusterState.metadata()::getIndicesLookup); publish( clusterStatePublicationEvent, new CompositeTaskAckListener( diff --git a/test/framework/src/test/java/org/elasticsearch/cluster/service/FakeThreadPoolMasterServiceTests.java b/test/framework/src/test/java/org/elasticsearch/cluster/service/FakeThreadPoolMasterServiceTests.java index 822a6753c1df4..72d80438e05bc 100644 --- a/test/framework/src/test/java/org/elasticsearch/cluster/service/FakeThreadPoolMasterServiceTests.java +++ b/test/framework/src/test/java/org/elasticsearch/cluster/service/FakeThreadPoolMasterServiceTests.java @@ -100,6 +100,10 @@ public void onFailure(Exception e) { assertThat(scheduleTask, hasToString("master service scheduling next task")); scheduleTask.run(); + // run tasks for computing routing nodes and indices lookup + runnableTasks.remove(0).run(); + runnableTasks.remove(0).run(); + final Runnable publishTask = runnableTasks.remove(0); assertThat(publishTask, hasToString(containsString("publish change of cluster state"))); publishTask.run(); @@ -137,6 +141,11 @@ public void onFailure(Exception e) { assertThat(runnableTasks.size(), equalTo(1)); // check that new task gets queued runnableTasks.remove(0).run(); // schedule again + + // run tasks for computing routing nodes and indices lookup + runnableTasks.remove(0).run(); + runnableTasks.remove(0).run(); + runnableTasks.remove(0).run(); // publish again assertThat(lastClusterStateRef.get().metadata().indices().size(), equalTo(2)); assertThat(lastClusterStateRef.get().version(), equalTo(firstClusterStateVersion + 2)); From 44c8d19b6ddb77c574a014ce5b521c9a62513ec9 Mon Sep 17 00:00:00 2001 From: Leaf-Lin <39002973+Leaf-Lin@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:24:31 +1000 Subject: [PATCH 050/265] Update snapshots.asciidoc (#87584) Adding a typo ``` in the doc --- docs/reference/cat/snapshots.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/cat/snapshots.asciidoc b/docs/reference/cat/snapshots.asciidoc index 6a2bbf040d493..6c400e235cb2d 100644 --- a/docs/reference/cat/snapshots.asciidoc +++ b/docs/reference/cat/snapshots.asciidoc @@ -103,7 +103,7 @@ units>>. `total_shards`, `ts`:: (Default) Total number of shards in the snapshot. -`reason, `r`:: +`reason`, `r`:: Reason for any snapshot failures. -- From d01dd395bbf242bae297ff73e7b3fb9935c21aca Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Tue, 2 Aug 2022 11:27:05 +0200 Subject: [PATCH 051/265] Audit log bulk update of API keys (#88942) This PR adds a new audit trail event for when API keys are updated in bulk. Relates: #88758 --- x-pack/docs/en/rest-api/security.asciidoc | 2 + .../security/bulk-update-api-keys.asciidoc | 5 +++ .../en/security/auditing/event-types.asciidoc | 36 +++++++++++++++- .../audit/logfile/LoggingAuditTrail.java | 41 ++++++++++++++++--- .../audit/logfile/LoggingAuditTrailTests.java | 35 ++++++++++++++++ 5 files changed, 111 insertions(+), 8 deletions(-) create mode 100644 x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index ba3cf88dac46b..a47f03cca1d2d 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -71,6 +71,7 @@ without requiring basic authentication: * <> * <> * <> +* <> [discrete] [[security-user-apis]] @@ -190,6 +191,7 @@ include::security/oidc-authenticate-api.asciidoc[] include::security/oidc-logout-api.asciidoc[] include::security/query-api-key.asciidoc[] include::security/update-api-key.asciidoc[] +include::security/bulk-update-api-keys.asciidoc[] include::security/saml-prepare-authentication-api.asciidoc[] include::security/saml-authenticate-api.asciidoc[] include::security/saml-logout-api.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc new file mode 100644 index 0000000000000..aaef85677f0ab --- /dev/null +++ b/x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc @@ -0,0 +1,5 @@ +[role="xpack"] +[[security-api-bulk-update-api-keys]] +=== Bulk update API keys API + +coming::[8.5.0] diff --git a/x-pack/docs/en/security/auditing/event-types.asciidoc b/x-pack/docs/en/security/auditing/event-types.asciidoc index b65c0fb3da31b..26387d24b54e4 100644 --- a/x-pack/docs/en/security/auditing/event-types.asciidoc +++ b/x-pack/docs/en/security/auditing/event-types.asciidoc @@ -258,6 +258,32 @@ event action. "tags":["dev","staging"]}}}}} ==== +[[event-change-apikeys]] +`change_apikeys`:: +Logged when the <> API is +invoked to update the attributes of multiple existing API keys. ++ +You must include the `security_config_change` event type to audit the related +event action. ++ +.Example +[%collapsible%open] +==== +[source,js] +{"type":"audit","timestamp":"2020-12-31T00:33:52,521+0200","node.id": +"9clhpgjJRR-iKzOw20xBNQ","event.type":"security_config_change", +"event.action":"change_apikeys","request.id":"9FteCmovTzWHVI-9Gpa_vQ", +"change":{"apikeys": +{"ids":["zcwN3YEBBmnjw-K-hW5_","j7c0WYIBqecB5CbVR6Oq"],"role_descriptors": +[{"cluster":["monitor","manage_ilm"],"indices":[{"names":["index-a*"],"privileges": +["read","maintenance"]},{"names":["in*","alias*"],"privileges":["read"], +"field_security":{"grant":["field1*","@timestamp"],"except":["field11"]}}], +"applications":[],"run_as":[]},{"cluster":["all"],"indices":[{"names": +["index-b*"],"privileges":["all"]}],"applications":[],"run_as":[]}], +"metadata":{"application":"my-application","environment":{"level":1, +"tags":["dev","staging"]}}}}} +==== + [[event-delete-privileges]] `delete_privileges`:: Logged when the @@ -563,7 +589,7 @@ the `event.action` attribute takes one of the following values: `put_user`, `change_password`, `put_role`, `put_role_mapping`, `change_enable_user`, `change_disable_user`, `put_privileges`, `create_apikey`, `delete_user`, `delete_role`, `delete_role_mapping`, `invalidate_apikeys`, -`delete_privileges`, or `change_apikey`. +`delete_privileges`, `change_apikey`, or `change_apikeys`. `request.id` :: A synthetic identifier that can be used to correlate the events associated with a particular REST request. @@ -653,7 +679,8 @@ ones): The events with the `event.type` attribute equal to `security_config_change` have one of the following `event.action` attribute values: `put_user`, `change_password`, `put_role`, `put_role_mapping`, `change_enable_user`, `change_disable_user`, `put_privileges`, `create_apikey`, `delete_user`, -`delete_role`, `delete_role_mapping`, `invalidate_apikeys`, `delete_privileges`, or `change_apikey`. +`delete_role`, `delete_role_mapping`, `invalidate_apikeys`, `delete_privileges`, `change_apikey`, +or `change_apikeys`. These events also have *one* of the following extra attributes (in addition to the common ones), which is specific to the `event.type` attribute. The attribute's value is a nested JSON object: @@ -789,6 +816,11 @@ a `name` or `expiration`. , "user":{"name": , "realm": }}` ---- // NOTCONSOLE ++ +The object for a bulk API key update will differ in that it will not +include `name`, `owned_by_authenticated_user`, or `user`. Instead, it +may include `metadata` and `role_descriptors`, which have the same +schemas as the fields in the `apikey` config object above. `service_token` :: An object like: + diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index 503d5becab553..1d4a08ccf82a7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -43,6 +43,9 @@ import org.elasticsearch.xcontent.json.JsonStringEncoder; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.action.Grant; +import org.elasticsearch.xpack.core.security.action.apikey.BaseUpdateApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; @@ -290,7 +293,8 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { ActivateProfileAction.NAME, UpdateProfileDataAction.NAME, SetProfileEnabledAction.NAME, - UpdateApiKeyAction.NAME + UpdateApiKeyAction.NAME, + BulkUpdateApiKeyAction.NAME ); private static final String FILTER_POLICY_PREFIX = setting("audit.logfile.events.ignore_filters."); // because of the default wildcard value (*) for the field filter, a policy with @@ -753,6 +757,9 @@ public void accessGranted( } else if (msg instanceof final UpdateApiKeyRequest updateApiKeyRequest) { assert UpdateApiKeyAction.NAME.equals(action); securityChangeLogEntryBuilder(requestId).withRequestBody(updateApiKeyRequest).build(); + } else if (msg instanceof final BulkUpdateApiKeyRequest bulkUpdateApiKeyRequest) { + assert BulkUpdateApiKeyAction.NAME.equals(action); + securityChangeLogEntryBuilder(requestId).withRequestBody(bulkUpdateApiKeyRequest).build(); } else { throw new IllegalStateException( "Unknown message class type [" @@ -1231,6 +1238,16 @@ LogEntryBuilder withRequestBody(final UpdateApiKeyRequest updateApiKeyRequest) t return this; } + LogEntryBuilder withRequestBody(final BulkUpdateApiKeyRequest bulkUpdateApiKeyRequest) throws IOException { + logEntry.with(EVENT_ACTION_FIELD_NAME, "change_apikeys"); + XContentBuilder builder = JsonXContent.contentBuilder().humanReadable(true); + builder.startObject(); + withRequestBody(builder, bulkUpdateApiKeyRequest); + builder.endObject(); + logEntry.with(CHANGE_CONFIG_FIELD_NAME, Strings.toString(builder)); + return this; + } + private void withRequestBody(XContentBuilder builder, CreateApiKeyRequest createApiKeyRequest) throws IOException { TimeValue expiration = createApiKeyRequest.getExpiration(); builder.startObject("apikey") @@ -1250,19 +1267,31 @@ private void withRequestBody(XContentBuilder builder, CreateApiKeyRequest create private void withRequestBody(final XContentBuilder builder, final UpdateApiKeyRequest updateApiKeyRequest) throws IOException { builder.startObject("apikey").field("id", updateApiKeyRequest.getId()); - if (updateApiKeyRequest.getRoleDescriptors() != null) { + withBaseUpdateApiKeyFields(builder, updateApiKeyRequest); + builder.endObject(); + } + + private void withRequestBody(final XContentBuilder builder, final BulkUpdateApiKeyRequest bulkUpdateApiKeyRequest) + throws IOException { + builder.startObject("apikeys").stringListField("ids", bulkUpdateApiKeyRequest.getIds()); + withBaseUpdateApiKeyFields(builder, bulkUpdateApiKeyRequest); + builder.endObject(); + } + + private void withBaseUpdateApiKeyFields(final XContentBuilder builder, final BaseUpdateApiKeyRequest baseUpdateApiKeyRequest) + throws IOException { + if (baseUpdateApiKeyRequest.getRoleDescriptors() != null) { builder.startArray("role_descriptors"); - for (RoleDescriptor roleDescriptor : updateApiKeyRequest.getRoleDescriptors()) { + for (RoleDescriptor roleDescriptor : baseUpdateApiKeyRequest.getRoleDescriptors()) { withRoleDescriptor(builder, roleDescriptor); } builder.endArray(); } - if (updateApiKeyRequest.getMetadata() != null) { + if (baseUpdateApiKeyRequest.getMetadata() != null) { // Include in entry even if metadata is empty. It's meaningful to track an empty metadata request parameter // because it replaces any metadata previously associated with the API key - builder.field("metadata", updateApiKeyRequest.getMetadata()); + builder.field("metadata", baseUpdateApiKeyRequest.getMetadata()); } - builder.endObject(); } private void withRoleDescriptor(XContentBuilder builder, RoleDescriptor roleDescriptor) throws IOException { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index 77345333629a1..e8dccb351623d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -46,6 +46,8 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; @@ -646,6 +648,39 @@ public void testSecurityConfigChangeEventFormattingForRoles() throws IOException // clear log CapturingLogger.output(logger.getName(), Level.INFO).clear(); + final List keyIds = randomList(1, 5, () -> randomAlphaOfLength(10)); + final var bulkUpdateApiKeyRequest = new BulkUpdateApiKeyRequest( + keyIds, + randomBoolean() ? null : keyRoleDescriptors, + metadataWithSerialization.metadata() + ); + auditTrail.accessGranted(requestId, authentication, BulkUpdateApiKeyAction.NAME, bulkUpdateApiKeyRequest, authorizationInfo); + final var expectedBulkUpdateKeyAuditEventString = """ + "change":{"apikeys":{"ids":[%s]%s%s}}\ + """.formatted( + bulkUpdateApiKeyRequest.getIds().stream().map("\"%s\""::formatted).collect(Collectors.joining(",")), + bulkUpdateApiKeyRequest.getRoleDescriptors() == null ? "" : "," + roleDescriptorsStringBuilder, + bulkUpdateApiKeyRequest.getMetadata() == null ? "" : ",\"metadata\":%s".formatted(metadataWithSerialization.serialization()) + ); + output = CapturingLogger.output(logger.getName(), Level.INFO); + assertThat(output.size(), is(2)); + String generatedBulkUpdateKeyAuditEventString = output.get(1); + assertThat(generatedBulkUpdateKeyAuditEventString, containsString(expectedBulkUpdateKeyAuditEventString)); + generatedBulkUpdateKeyAuditEventString = generatedBulkUpdateKeyAuditEventString.replace( + ", " + expectedBulkUpdateKeyAuditEventString, + "" + ); + checkedFields = new MapBuilder<>(commonFields); + checkedFields.remove(LoggingAuditTrail.ORIGIN_ADDRESS_FIELD_NAME); + checkedFields.remove(LoggingAuditTrail.ORIGIN_TYPE_FIELD_NAME); + checkedFields.put("type", "audit") + .put(LoggingAuditTrail.EVENT_TYPE_FIELD_NAME, "security_config_change") + .put(LoggingAuditTrail.EVENT_ACTION_FIELD_NAME, "change_apikeys") + .put(LoggingAuditTrail.REQUEST_ID_FIELD_NAME, requestId); + assertMsg(generatedBulkUpdateKeyAuditEventString, checkedFields.map()); + // clear log + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + GrantApiKeyRequest grantApiKeyRequest = new GrantApiKeyRequest(); grantApiKeyRequest.setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())); grantApiKeyRequest.getGrant().setType(randomFrom(randomAlphaOfLength(8), null)); From 241f1bc2be274093879b8fb66dec1ccb13614ac1 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Tue, 2 Aug 2022 11:33:46 +0100 Subject: [PATCH 052/265] Wrap async ql task execution in new tracing context (#89029) Part of #84369. Split out from #88443. This PR wraps parts logic in `AsyncTaskManagementService` in a new tracing context. This is necessary so that a tracing implementation can use the thread context to propagate tracing headers, but without the code attempting to set the same key twice in the thread context, which is illegal. --- .../ql/async/AsyncTaskManagementService.java | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java index eacb25b10d622..d42f2619a166a 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java @@ -170,20 +170,22 @@ public void asyncExecute( ActionListener listener ) { String nodeId = clusterService.localNode().getId(); - @SuppressWarnings("unchecked") - T searchTask = (T) taskManager.register("transport", action + "[a]", new AsyncRequestWrapper(request, nodeId)); - boolean operationStarted = false; - try { - operation.execute( - request, - searchTask, - wrapStoringListener(searchTask, waitForCompletionTimeout, keepAlive, keepOnCompletion, listener) - ); - operationStarted = true; - } finally { - // If we didn't start operation for any reason, we need to clean up the task that we have created - if (operationStarted == false) { - taskManager.unregister(searchTask); + try (var ignored = threadPool.getThreadContext().newTraceContext()) { + @SuppressWarnings("unchecked") + T searchTask = (T) taskManager.register("transport", action + "[a]", new AsyncRequestWrapper(request, nodeId)); + boolean operationStarted = false; + try { + operation.execute( + request, + searchTask, + wrapStoringListener(searchTask, waitForCompletionTimeout, keepAlive, keepOnCompletion, listener) + ); + operationStarted = true; + } finally { + // If we didn't start operation for any reason, we need to clean up the task that we have created + if (operationStarted == false) { + taskManager.unregister(searchTask); + } } } } From 35e073695675b4a26ef04223aaf84b4bcbbb5485 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 2 Aug 2022 13:03:14 +0200 Subject: [PATCH 053/265] Make it explicit that test expects no rebalancing (#89028) This is required in case new shards allocator might be more proactive with rebalancing. --- .../gateway/ReplicaShardAllocatorIT.java | 26 ++++++++++++------- .../ReplicaShardAllocatorSyncIdIT.java | 19 +++++++++++--- 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java index 597c030e86fb5..92d28927db24d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNodesHelper; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.breaker.CircuitBreaker; @@ -40,8 +41,10 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -63,6 +66,16 @@ protected Collection> nodePlugins() { public void testPreferCopyCanPerformNoopRecovery() throws Exception { String indexName = "test"; String nodeWithPrimary = internalCluster().startNode(); + + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) + ) + ); + assertAcked( client().admin() .indices() @@ -113,6 +126,7 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { blockRecovery.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); + throw new AssertionError(e); } } connection.sendRequest(requestId, action, request, options); @@ -121,17 +135,9 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { recoveryStarted.await(); nodeWithReplica = internalCluster().startDataOnlyNode(nodeWithReplicaSettings); // AllocationService only calls GatewayAllocator if there are unassigned shards - assertAcked( - client().admin() - .indices() - .prepareCreate("dummy-index") - // make sure the new index does not get allocated to the node with the existing replica to prevent rebalancing from - // randomly moving the replica off of this node after the noop recovery - .setSettings(Settings.builder().put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name", nodeWithReplica)) - .setWaitForActiveShards(0) - ); + assertAcked(client().admin().indices().prepareCreate("dummy-index").setWaitForActiveShards(0)); ensureGreen(indexName); - assertThat(internalCluster().nodesInclude(indexName), hasItem(nodeWithReplica)); + assertThat(internalCluster().nodesInclude(indexName), containsInAnyOrder(nodeWithPrimary, nodeWithReplica)); assertNoOpRecoveries(indexName); blockRecovery.countDown(); transportServiceOnPrimary.clearAllRules(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java index 74eb70f4453e3..1008eabd6cc9e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ReleasableLock; @@ -50,10 +51,11 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.IntStream; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; /** @@ -148,6 +150,16 @@ private void syncFlush(String index) throws IOException { public void testPreferCopyCanPerformNoopRecovery() throws Exception { String indexName = "test"; String nodeWithPrimary = internalCluster().startNode(); + + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) + ) + ); + assertAcked( client().admin() .indices() @@ -196,6 +208,7 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { blockRecovery.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); + throw new AssertionError(e); } } connection.sendRequest(requestId, action, request, options); @@ -203,10 +216,10 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { internalCluster().startDataOnlyNode(); recoveryStarted.await(); nodeWithReplica = internalCluster().startDataOnlyNode(nodeWithReplicaSettings); - // AllocationService only calls GatewayAllocator if there're unassigned shards + // AllocationService only calls GatewayAllocator if there are unassigned shards assertAcked(client().admin().indices().prepareCreate("dummy-index").setWaitForActiveShards(0)); ensureGreen(indexName); - assertThat(internalCluster().nodesInclude(indexName), hasItem(nodeWithReplica)); + assertThat(internalCluster().nodesInclude(indexName), containsInAnyOrder(nodeWithPrimary, nodeWithReplica)); assertNoOpRecoveries(indexName); blockRecovery.countDown(); transportServiceOnPrimary.clearAllRules(); From bf360a3d0c01e3f947ed297f6ed5b4ab2180f5df Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 2 Aug 2022 07:57:30 -0400 Subject: [PATCH 054/265] Synthetic source: add test for frozen (#89013) Adds a test for synthetic _source and searchable snapshots. --- .../20_synthetic_source.yml | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml new file mode 100644 index 0000000000000..8d9c138b9c161 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/searchable_snapshots/20_synthetic_source.yml @@ -0,0 +1,82 @@ +--- +setup: + - skip: + version: " - 8.4.99" + reason: added in 8.5 + + - do: + indices.create: + index: docs + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _source: + mode: synthetic + properties: + obj: + properties: + field: + type: keyword + + - do: + bulk: + body: + - index: + _index: docs + _id: "1" + - obj.field: foo + + - do: + snapshot.create_repository: + repository: repository-fs + body: + type: fs + settings: + location: "repository-fs" + + # Remove the snapshot if a previous test failed to delete it. + # Useful for third party tests that runs the test against a real external service. + - do: + snapshot.delete: + repository: repository-fs + snapshot: snapshot + ignore: 404 + + - do: + snapshot.create: + repository: repository-fs + snapshot: snapshot + wait_for_completion: true + + - do: + indices.delete: + index: docs + +--- +"Tests searchable snapshots usage stats": + - skip: + version: " - 8.4.99" + reason: added in 8.5 + + - do: + searchable_snapshots.mount: + repository: repository-fs + snapshot: snapshot + wait_for_completion: true + storage: shared_cache + body: + index: docs + + - do: + search: + index: docs + + - match: { hits.total.value: 1 } + - match: # synthetic source will push the dotted field name to an object + hits.hits.0._source: + obj: + field: foo + + From 1337de73e321ad5c8a60a8de115a23950723e479 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 2 Aug 2022 14:11:08 +0200 Subject: [PATCH 055/265] Mute testBlockClusterStateProcessingOnOneNode (#89038) Related to: #89015 --- .../cluster/coordination/CoordinationDiagnosticsServiceIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java index 1cf1b19359cbf..8819023d8d47d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java @@ -35,6 +35,7 @@ private void setBootstrapMasterNodeIndex() { internalCluster().setBootstrapMasterNodeIndex(0); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/89015") public void testBlockClusterStateProcessingOnOneNode() throws Exception { /* * This test picks a node that is not elected master, and then blocks cluster state processing on it. The reason is so that we From 3a31c30329eddcd0eae69157e4eb84104eac64a7 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Tue, 2 Aug 2022 14:18:09 +0200 Subject: [PATCH 056/265] Ensure tests don't use flat polygons (#89002) * Ensure tests don't use flat polygons Polygons with colinear points cannot be tessellated. * Fixed test error message to not be box specific --- .../search/CartesianShapeQueryTestCase.java | 5 +-- .../search/CartesianShapeQueryTests.java | 1 - .../xpack/spatial/util/ShapeTestUtils.java | 15 ++++++- .../xpack/spatial/util/ShapeUtilTests.java | 45 +++++++++++++++++++ 4 files changed, 60 insertions(+), 6 deletions(-) create mode 100644 x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeUtilTests.java diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTestCase.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTestCase.java index 6002ae583aaa1..ca7f59b703f28 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTestCase.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTestCase.java @@ -7,13 +7,11 @@ package org.elasticsearch.xpack.spatial.search; -import org.apache.lucene.tests.geo.GeoTestUtil; import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.Orientation; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.GeometryCollection; import org.elasticsearch.geometry.Line; -import org.elasticsearch.geometry.LinearRing; import org.elasticsearch.geometry.Point; import org.elasticsearch.geometry.Polygon; import org.elasticsearch.search.geo.BaseShapeQueryTestCase; @@ -71,8 +69,7 @@ protected Point nextPoint() { } protected Polygon nextPolygon() { - org.apache.lucene.geo.Polygon randomPoly = GeoTestUtil.nextPolygon(); - return new Polygon(new LinearRing(randomPoly.getPolyLons(), randomPoly.getPolyLats())); + return ShapeTestUtils.randomPolygon(false); } protected Polygon nextPolygon2() { diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTests.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTests.java index e12c9755409ae..0cf5818d10819 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTests.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTests.java @@ -21,7 +21,6 @@ protected Collection> getPlugins() { } @Override - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/88682") public void testQueryRandomGeoCollection() throws Exception { super.testQueryRandomGeoCollection(); } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeTestUtils.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeTestUtils.java index 5ef792665e2f7..a6f4f2e86ef76 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeTestUtils.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeTestUtils.java @@ -27,10 +27,13 @@ import java.util.function.Function; import static org.elasticsearch.geo.GeometryTestUtils.linearRing; +import static org.elasticsearch.test.ESTestCase.randomValueOtherThanMany; /** generates random cartesian shapes */ public class ShapeTestUtils { + public static final double MIN_VALID_AREA = 1e-10; + public static double randomValue() { return XShapeTestUtil.nextDouble(); } @@ -80,7 +83,7 @@ public static Line randomLine(boolean hasAlts) { } public static Polygon randomPolygon(boolean hasAlt) { - XYPolygon lucenePolygon = XShapeTestUtil.nextPolygon(); + XYPolygon lucenePolygon = randomValueOtherThanMany(p -> area(p) <= MIN_VALID_AREA, XShapeTestUtil::nextPolygon); if (lucenePolygon.numHoles() > 0) { XYPolygon[] luceneHoles = lucenePolygon.getHoles(); List holes = new ArrayList<>(); @@ -96,6 +99,16 @@ public static Polygon randomPolygon(boolean hasAlt) { return new Polygon(linearRing(floatsToDoubles(lucenePolygon.getPolyX()), floatsToDoubles(lucenePolygon.getPolyY()), hasAlt)); } + static double area(XYPolygon p) { + double windingSum = 0; + final int numPts = p.numPoints() - 1; + for (int i = 0; i < numPts; i++) { + // compute signed area + windingSum += p.getPolyX(i) * p.getPolyY(i + 1) - p.getPolyY(i) * p.getPolyX(i + 1); + } + return Math.abs(windingSum / 2); + } + static double[] floatsToDoubles(float[] f) { double[] d = new double[f.length]; for (int i = 0; i < f.length; i++) { diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeUtilTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeUtilTests.java new file mode 100644 index 0000000000000..442c134c648b2 --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeUtilTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.util; + +import org.apache.lucene.geo.XShapeTestUtil; +import org.apache.lucene.geo.XYPolygon; +import org.apache.lucene.geo.XYRectangle; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; + +public class ShapeUtilTests extends ESTestCase { + public void testBox() { + XYRectangle geom = XShapeTestUtil.nextBox(); + assertThat("Geometry minX should be less than maxX", geom.minX, lessThan(geom.maxX)); + assertThat("Geometry minY should be less than maxY", geom.minY, lessThan(geom.maxY)); + } + + public void testPolygon() { + XYPolygon geom = XShapeTestUtil.nextPolygon(); + assertThat("Geometry minX should be less than maxX", geom.minX, lessThan(geom.maxX)); + assertThat("Geometry minY should be less than maxY", geom.minY, lessThan(geom.maxY)); + assertThat("Geometry area should be non-zero", ShapeTestUtils.area(geom), greaterThan(0.0)); + } + + public void testFlatRectangle() { + XYPolygon geom = new XYPolygon( + new float[] { 54.69f, 54.69f, 180.0f, 180.0f, 54.69f }, + new float[] { -2.80E-33f, 5.85E-33f, 5.85E-33f, -2.80E-33f, -2.80E-33f } + ); + assertThat("Geometry minX should be less than maxX", geom.minX, lessThan(geom.maxX)); + assertThat("Geometry minY should be less than maxY", geom.minY, lessThan(geom.maxY)); + assertThat( + "This flat rectangle has area less than allowed threshold", + ShapeTestUtils.area(geom), + lessThan(ShapeTestUtils.MIN_VALID_AREA) + ); + } +} From 577c1c93d742e878939c9cde85f5056c760948fa Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Tue, 2 Aug 2022 13:45:34 +0100 Subject: [PATCH 057/265] Wrap async search action logic in a new trace context (#88937) Part of #84369. Split out from #88443. This PR wraps parts logic in `TransportSubmitAsyncSearchAction` in a new tracing context. This is necessary so that a tracing implementation can use the thread context to propagate tracing headers, but without the code attempting to set the same key twice in the thread context, which is illegal. --- .../TransportSubmitAsyncSearchAction.java | 108 +++++++++--------- 1 file changed, 55 insertions(+), 53 deletions(-) diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java index 1c9e40f4cfda1..0f7fd1867de8e 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java @@ -83,65 +83,67 @@ public TransportSubmitAsyncSearchAction( @Override protected void doExecute(Task submitTask, SubmitAsyncSearchRequest request, ActionListener submitListener) { final SearchRequest searchRequest = createSearchRequest(request, submitTask, request.getKeepAlive()); - AsyncSearchTask searchTask = (AsyncSearchTask) taskManager.register("transport", SearchAction.INSTANCE.name(), searchRequest); - searchAction.execute(searchTask, searchRequest, searchTask.getSearchProgressActionListener()); - searchTask.addCompletionListener(new ActionListener<>() { - @Override - public void onResponse(AsyncSearchResponse searchResponse) { - if (searchResponse.isRunning() || request.isKeepOnCompletion()) { - // the task is still running and the user cannot wait more so we create - // a document for further retrieval - try { - final String docId = searchTask.getExecutionId().getDocId(); - // creates the fallback response if the node crashes/restarts in the middle of the request - // TODO: store intermediate results ? - AsyncSearchResponse initialResp = searchResponse.clone(searchResponse.getId()); - store.createResponse(docId, searchTask.getOriginHeaders(), initialResp, new ActionListener<>() { - @Override - public void onResponse(IndexResponse r) { - if (searchResponse.isRunning()) { - try { - // store the final response on completion unless the submit is cancelled - searchTask.addCompletionListener( - finalResponse -> onFinalResponse(searchTask, finalResponse, () -> {}) - ); - } finally { - submitListener.onResponse(searchResponse); + try (var ignored = threadContext.newTraceContext()) { + AsyncSearchTask searchTask = (AsyncSearchTask) taskManager.register("transport", SearchAction.INSTANCE.name(), searchRequest); + searchAction.execute(searchTask, searchRequest, searchTask.getSearchProgressActionListener()); + searchTask.addCompletionListener(new ActionListener<>() { + @Override + public void onResponse(AsyncSearchResponse searchResponse) { + if (searchResponse.isRunning() || request.isKeepOnCompletion()) { + // the task is still running and the user cannot wait more so we create + // a document for further retrieval + try { + final String docId = searchTask.getExecutionId().getDocId(); + // creates the fallback response if the node crashes/restarts in the middle of the request + // TODO: store intermediate results ? + AsyncSearchResponse initialResp = searchResponse.clone(searchResponse.getId()); + store.createResponse(docId, searchTask.getOriginHeaders(), initialResp, new ActionListener<>() { + @Override + public void onResponse(IndexResponse r) { + if (searchResponse.isRunning()) { + try { + // store the final response on completion unless the submit is cancelled + searchTask.addCompletionListener( + finalResponse -> onFinalResponse(searchTask, finalResponse, () -> {}) + ); + } finally { + submitListener.onResponse(searchResponse); + } + } else { + onFinalResponse(searchTask, searchResponse, () -> submitListener.onResponse(searchResponse)); } - } else { - onFinalResponse(searchTask, searchResponse, () -> submitListener.onResponse(searchResponse)); } - } - @Override - public void onFailure(Exception exc) { - onFatalFailure( - searchTask, - exc, - searchResponse.isRunning(), - "fatal failure: unable to store initial response", - submitListener - ); - } - }); - } catch (Exception exc) { - onFatalFailure(searchTask, exc, searchResponse.isRunning(), "fatal failure: generic error", submitListener); + @Override + public void onFailure(Exception exc) { + onFatalFailure( + searchTask, + exc, + searchResponse.isRunning(), + "fatal failure: unable to store initial response", + submitListener + ); + } + }); + } catch (Exception exc) { + onFatalFailure(searchTask, exc, searchResponse.isRunning(), "fatal failure: generic error", submitListener); + } + } else { + // the task completed within the timeout so the response is sent back to the user + // with a null id since nothing was stored on the cluster. + taskManager.unregister(searchTask); + submitListener.onResponse(searchResponse.clone(null)); } - } else { - // the task completed within the timeout so the response is sent back to the user - // with a null id since nothing was stored on the cluster. - taskManager.unregister(searchTask); - submitListener.onResponse(searchResponse.clone(null)); } - } - @Override - public void onFailure(Exception exc) { - // this will only ever be called if there is an issue scheduling the thread that executes - // the completion listener once the wait for completion timeout expires. - onFatalFailure(searchTask, exc, true, "fatal failure: addCompletionListener", submitListener); - } - }, request.getWaitForCompletionTimeout()); + @Override + public void onFailure(Exception exc) { + // this will only ever be called if there is an issue scheduling the thread that executes + // the completion listener once the wait for completion timeout expires. + onFatalFailure(searchTask, exc, true, "fatal failure: addCompletionListener", submitListener); + } + }, request.getWaitForCompletionTimeout()); + } } private SearchRequest createSearchRequest(SubmitAsyncSearchRequest request, Task submitTask, TimeValue keepAlive) { From 5f14c79320473605764840fd715ebbb31ec7491e Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Tue, 2 Aug 2022 14:45:14 +0100 Subject: [PATCH 058/265] Wrap ML model loading task in new tracing context (#89024) Part of #84369. ML uses the task framework to register a tasks for each loaded model. These tasks are not executed in the usual sense, and it does not make sense to trace them using APM. Therefore, make it possible to register a task without also starting tracing. --- .../java/org/elasticsearch/tasks/TaskManager.java | 13 ++++++++++++- .../TrainedModelAssignmentNodeService.java | 3 ++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index ff3ca13dc9ce9..a7fd93127771a 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -120,6 +120,15 @@ public void setTaskCancellationService(TaskCancellationService taskCancellationS * Registers a task without parent task */ public Task register(String type, String action, TaskAwareRequest request) { + return register(type, action, request, true); + } + + /** + * Registers a task without a parent task, and specifies whether to trace the request. You should prefer + * to call {@link #register(String, String, TaskAwareRequest)}, since it is rare to want to avoid + * tracing a task. + */ + public Task register(String type, String action, TaskAwareRequest request, boolean traceRequest) { Map headers = new HashMap<>(); long headerSize = 0; long maxSize = maxHeaderSize.getBytes(); @@ -149,7 +158,9 @@ public Task register(String type, String action, TaskAwareRequest request) { } else { Task previousTask = tasks.put(task.getId(), task); assert previousTask == null; - startTrace(threadContext, task); + if (traceRequest) { + startTrace(threadContext, task); + } } return task; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index 8c46427f6d249..700baef487fcb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -459,7 +459,8 @@ void prepareModelToLoad(StartTrainedModelDeploymentAction.TaskParams taskParams) TrainedModelDeploymentTask task = (TrainedModelDeploymentTask) taskManager.register( TRAINED_MODEL_ASSIGNMENT_TASK_TYPE, TRAINED_MODEL_ASSIGNMENT_TASK_ACTION, - taskAwareRequest(taskParams) + taskAwareRequest(taskParams), + false ); // threadsafe check to verify we are not loading/loaded the model if (modelIdToTask.putIfAbsent(taskParams.getModelId(), task) == null) { From 480479d288f2a4b3f5d48e1757027dfcdf643c57 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 2 Aug 2022 11:07:48 -0400 Subject: [PATCH 059/265] [ML] fix NLP inference_config bwc serialization tests (#89011) The tests were failing because of span not being nulled out for question_answering and text_similarity tasks. But, this change also attempts to make it more future proof so that if changes occur to the nlp task or tokenization configurations it will cause a failure more quickly and require handling the bwc testing. closes: #89008 --- .../ZeroShotClassificationConfig.java | 4 +- .../ZeroShotClassificationConfigUpdate.java | 4 +- .../InferenceConfigItemTestCase.java | 41 +++++++++++++++++++ .../ml/inference/TrainedModelConfigTests.java | 8 ++-- .../trainedmodel/BertTokenizationTests.java | 15 ++++++- .../trainedmodel/FillMaskConfigTests.java | 11 ++++- .../InferenceConfigTestScaffolding.java | 14 +++++++ .../trainedmodel/MPNetTokenizationTests.java | 15 ++++++- .../trainedmodel/NerConfigTests.java | 11 ++++- .../trainedmodel/PassThroughConfigTests.java | 10 ++++- .../QuestionAnsweringConfigTests.java | 14 +++++-- .../RobertaTokenizationTests.java | 15 ++++++- .../TextClassificationConfigTests.java | 41 ++++--------------- .../TextEmbeddingConfigTests.java | 10 ++++- .../TextSimilarityConfigTests.java | 13 ++++-- .../ZeroShotClassificationConfigTests.java | 14 ++++++- ...roShotClassificationConfigUpdateTests.java | 6 +-- .../nlp/ZeroShotClassificationProcessor.java | 6 +-- 18 files changed, 191 insertions(+), 61 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java index 3ee09ffc1e837..710a2855167cf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java @@ -248,8 +248,8 @@ public String getHypothesisTemplate() { return hypothesisTemplate; } - public List getLabels() { - return Optional.ofNullable(labels).orElse(List.of()); + public Optional> getLabels() { + return Optional.ofNullable(labels); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java index 3cf9f8c8f8354..acfd726ca27a5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java @@ -147,13 +147,13 @@ public InferenceConfig apply(InferenceConfig originalConfig) { tokenizationUpdate == null ? zeroShotConfig.getTokenization() : tokenizationUpdate.apply(zeroShotConfig.getTokenization()), zeroShotConfig.getHypothesisTemplate(), Optional.ofNullable(isMultiLabel).orElse(zeroShotConfig.isMultiLabel()), - Optional.ofNullable(labels).orElse(zeroShotConfig.getLabels()), + Optional.ofNullable(labels).orElse(zeroShotConfig.getLabels().orElse(null)), Optional.ofNullable(resultsField).orElse(zeroShotConfig.getResultsField()) ); } boolean isNoop(ZeroShotClassificationConfig originalConfig) { - return (labels == null || labels.equals(originalConfig.getLabels())) + return (labels == null || labels.equals(originalConfig.getLabels().orElse(null))) && (isMultiLabel == null || isMultiLabel.equals(originalConfig.isMultiLabel())) && (resultsField == null || resultsField.equals(originalConfig.getResultsField())) && super.isNoop(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java index 79157bcb5ab27..37b37940a5780 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java @@ -15,6 +15,24 @@ import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.FillMaskConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.FillMaskConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NerConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NerConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PassThroughConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PassThroughConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.QuestionAnsweringConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.QuestionAnsweringConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ZeroShotClassificationConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ZeroShotClassificationConfigTests; import java.util.ArrayList; import java.util.Collections; @@ -25,6 +43,29 @@ public abstract class InferenceConfigItemTestCase extends AbstractBWCSerializationTestCase< T> { + + static InferenceConfig mutateForVersion(NlpConfig inferenceConfig, Version version) { + if (inferenceConfig instanceof TextClassificationConfig textClassificationConfig) { + return TextClassificationConfigTests.mutateForVersion(textClassificationConfig, version); + } else if (inferenceConfig instanceof FillMaskConfig fillMaskConfig) { + return FillMaskConfigTests.mutateForVersion(fillMaskConfig, version); + } else if (inferenceConfig instanceof QuestionAnsweringConfig questionAnsweringConfig) { + return QuestionAnsweringConfigTests.mutateForVersion(questionAnsweringConfig, version); + } else if (inferenceConfig instanceof NerConfig nerConfig) { + return NerConfigTests.mutateForVersion(nerConfig, version); + } else if (inferenceConfig instanceof PassThroughConfig passThroughConfig) { + return PassThroughConfigTests.mutateForVersion(passThroughConfig, version); + } else if (inferenceConfig instanceof TextEmbeddingConfig textEmbeddingConfig) { + return TextEmbeddingConfigTests.mutateForVersion(textEmbeddingConfig, version); + } else if (inferenceConfig instanceof TextSimilarityConfig textSimilarityConfig) { + return TextSimilarityConfigTests.mutateForVersion(textSimilarityConfig, version); + } else if (inferenceConfig instanceof ZeroShotClassificationConfig zeroShotClassificationConfig) { + return ZeroShotClassificationConfigTests.mutateForVersion(zeroShotClassificationConfig, version); + } else { + throw new IllegalArgumentException("unknown inference config [" + inferenceConfig.getName() + "]"); + } + } + @Override protected NamedXContentRegistry xContentRegistry() { List namedXContent = new ArrayList<>(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java index 90b37a67cf6f8..bbff114de5d4c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.core.ml.inference; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesReference; @@ -29,10 +28,10 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.IndexLocationTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NerConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PassThroughConfigTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.QuestionAnsweringConfigTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.RegressionConfigTests; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfigTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfigTests; @@ -60,7 +59,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/89008") public class TrainedModelConfigTests extends AbstractBWCSerializationTestCase { private boolean lenient; @@ -397,8 +395,8 @@ protected TrainedModelConfig mutateInstanceForVersion(TrainedModelConfig instanc builder.setModelType(null); builder.setLocation(null); } - if (instance.getInferenceConfig()instanceof TextClassificationConfig textClassificationConfig) { - builder.setInferenceConfig(TextClassificationConfigTests.mutateInstance(textClassificationConfig, version)); + if (instance.getInferenceConfig()instanceof NlpConfig nlpConfig) { + builder.setInferenceConfig(InferenceConfigItemTestCase.mutateForVersion(nlpConfig, version)); } return builder.build(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java index 9a84c254c5452..952e6b4372534 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java @@ -19,6 +19,19 @@ public class BertTokenizationTests extends AbstractBWCSerializationTestCase getRandomFieldsExcludeFilter() { return field -> field.isEmpty() == false; @@ -44,7 +53,7 @@ protected FillMaskConfig createTestInstance() { @Override protected FillMaskConfig mutateInstanceForVersion(FillMaskConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static FillMaskConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfigTestScaffolding.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfigTestScaffolding.java index 43020fe23e114..228cdb40e3a89 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfigTestScaffolding.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfigTestScaffolding.java @@ -7,8 +7,22 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; +import org.elasticsearch.Version; + public final class InferenceConfigTestScaffolding { + static Tokenization mutateTokenizationForVersion(Tokenization tokenization, Version version) { + if (tokenization instanceof BertTokenization bertTokenization) { + return BertTokenizationTests.mutateForVersion(bertTokenization, version); + } else if (tokenization instanceof MPNetTokenization mpNetTokenization) { + return MPNetTokenizationTests.mutateForVersion(mpNetTokenization, version); + } else if (tokenization instanceof RobertaTokenization robertaTokenization) { + return RobertaTokenizationTests.mutateForVersion(robertaTokenization, version); + } else { + throw new IllegalArgumentException("unknown tokenization [" + tokenization.getName() + "]"); + } + } + static Tokenization cloneWithNewTruncation(Tokenization tokenization, Tokenization.Truncate truncate) { if (tokenization instanceof MPNetTokenization) { return new MPNetTokenization( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/MPNetTokenizationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/MPNetTokenizationTests.java index 4c01935a7ef43..dead82c736445 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/MPNetTokenizationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/MPNetTokenizationTests.java @@ -19,6 +19,19 @@ public class MPNetTokenizationTests extends AbstractBWCSerializationTestCase { + public static NerConfig mutateForVersion(NerConfig instance, Version version) { + return new NerConfig( + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getClassificationLabels(), + instance.getResultsField() + ); + } + @Override protected boolean supportsUnknownFields() { return true; @@ -48,7 +57,7 @@ protected NerConfig createTestInstance() { @Override protected NerConfig mutateInstanceForVersion(NerConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static NerConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigTests.java index 3701a07b73d5b..28e107101d288 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigTests.java @@ -17,6 +17,14 @@ public class PassThroughConfigTests extends InferenceConfigItemTestCase { + public static PassThroughConfig mutateForVersion(PassThroughConfig instance, Version version) { + return new PassThroughConfig( + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getResultsField() + ); + } + @Override protected boolean supportsUnknownFields() { return true; @@ -44,7 +52,7 @@ protected PassThroughConfig createTestInstance() { @Override protected PassThroughConfig mutateInstanceForVersion(PassThroughConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static PassThroughConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java index 4f3b09259f8f9..0f8f2f0783660 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentParser; @@ -16,9 +15,18 @@ import java.io.IOException; import java.util.function.Predicate; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/89008") public class QuestionAnsweringConfigTests extends InferenceConfigItemTestCase { + public static QuestionAnsweringConfig mutateForVersion(QuestionAnsweringConfig instance, Version version) { + return new QuestionAnsweringConfig( + instance.getNumTopClasses(), + instance.getMaxAnswerLength(), + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getResultsField() + ); + } + @Override protected boolean supportsUnknownFields() { return true; @@ -46,7 +54,7 @@ protected QuestionAnsweringConfig createTestInstance() { @Override protected QuestionAnsweringConfig mutateInstanceForVersion(QuestionAnsweringConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static QuestionAnsweringConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RobertaTokenizationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RobertaTokenizationTests.java index 0803fec7304bc..920933be7450e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RobertaTokenizationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RobertaTokenizationTests.java @@ -19,6 +19,19 @@ public class RobertaTokenizationTests extends AbstractBWCSerializationTestCase { - public static TextClassificationConfig mutateInstance(TextClassificationConfig instance, Version version) { - if (version.before(Version.V_8_2_0)) { - final Tokenization tokenization; - if (instance.getTokenization() instanceof BertTokenization) { - tokenization = new BertTokenization( - instance.getTokenization().doLowerCase, - instance.getTokenization().withSpecialTokens, - instance.getTokenization().maxSequenceLength, - instance.getTokenization().truncate, - null - ); - } else if (instance.getTokenization() instanceof MPNetTokenization) { - tokenization = new MPNetTokenization( - instance.getTokenization().doLowerCase, - instance.getTokenization().withSpecialTokens, - instance.getTokenization().maxSequenceLength, - instance.getTokenization().truncate, - null - ); - } else { - throw new UnsupportedOperationException("unknown tokenization type: " + instance.getTokenization().getName()); - } - return new TextClassificationConfig( - instance.getVocabularyConfig(), - tokenization, - instance.getClassificationLabels(), - instance.getNumTopClasses(), - instance.getResultsField() - ); - } - return instance; + public static TextClassificationConfig mutateForVersion(TextClassificationConfig instance, Version version) { + return new TextClassificationConfig( + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getClassificationLabels(), + instance.getNumTopClasses(), + instance.getResultsField() + ); } @Override @@ -81,7 +58,7 @@ protected TextClassificationConfig createTestInstance() { @Override protected TextClassificationConfig mutateInstanceForVersion(TextClassificationConfig instance, Version version) { - return mutateInstance(instance, version); + return mutateForVersion(instance, version); } public void testInvalidClassificationLabels() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigTests.java index 373f3d3102e15..d60a8b28107da 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigTests.java @@ -17,6 +17,14 @@ public class TextEmbeddingConfigTests extends InferenceConfigItemTestCase { + public static TextEmbeddingConfig mutateForVersion(TextEmbeddingConfig instance, Version version) { + return new TextEmbeddingConfig( + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getResultsField() + ); + } + @Override protected boolean supportsUnknownFields() { return true; @@ -44,7 +52,7 @@ protected TextEmbeddingConfig createTestInstance() { @Override protected TextEmbeddingConfig mutateInstanceForVersion(TextEmbeddingConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static TextEmbeddingConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java index 77dd5dcf38d61..e8976ce1dd7c5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentParser; @@ -17,9 +16,17 @@ import java.util.Arrays; import java.util.function.Predicate; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/89008") public class TextSimilarityConfigTests extends InferenceConfigItemTestCase { + public static TextSimilarityConfig mutateForVersion(TextSimilarityConfig instance, Version version) { + return new TextSimilarityConfig( + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getResultsField(), + instance.getSpanScoreFunction().toString() + ); + } + @Override protected boolean supportsUnknownFields() { return true; @@ -47,7 +54,7 @@ protected TextSimilarityConfig createTestInstance() { @Override protected TextSimilarityConfig mutateInstanceForVersion(TextSimilarityConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static TextSimilarityConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigTests.java index 63b271c04dffb..48e4b25ea7316 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigTests.java @@ -18,6 +18,18 @@ public class ZeroShotClassificationConfigTests extends InferenceConfigItemTestCase { + public static ZeroShotClassificationConfig mutateForVersion(ZeroShotClassificationConfig instance, Version version) { + return new ZeroShotClassificationConfig( + instance.getClassificationLabels(), + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getHypothesisTemplate(), + instance.isMultiLabel(), + instance.getLabels().orElse(null), + instance.getResultsField() + ); + } + @Override protected boolean supportsUnknownFields() { return true; @@ -45,7 +57,7 @@ protected ZeroShotClassificationConfig createTestInstance() { @Override protected ZeroShotClassificationConfig mutateInstanceForVersion(ZeroShotClassificationConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static ZeroShotClassificationConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java index 7aa80885ed7f4..2d424edac4c94 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java @@ -125,7 +125,7 @@ public void testApply() { originalConfig.getTokenization(), originalConfig.getHypothesisTemplate(), true, - originalConfig.getLabels(), + originalConfig.getLabels().orElse(null), originalConfig.getResultsField() ), equalTo(new ZeroShotClassificationConfigUpdate.Builder().setMultiLabel(true).build().apply(originalConfig)) @@ -137,7 +137,7 @@ public void testApply() { originalConfig.getTokenization(), originalConfig.getHypothesisTemplate(), originalConfig.isMultiLabel(), - originalConfig.getLabels(), + originalConfig.getLabels().orElse(null), "updated-field" ), equalTo(new ZeroShotClassificationConfigUpdate.Builder().setResultsField("updated-field").build().apply(originalConfig)) @@ -152,7 +152,7 @@ public void testApply() { tokenization, originalConfig.getHypothesisTemplate(), originalConfig.isMultiLabel(), - originalConfig.getLabels(), + originalConfig.getLabels().orElse(null), originalConfig.getResultsField() ), equalTo( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java index e19529b705d77..eff6916d61609 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java @@ -52,7 +52,7 @@ public class ZeroShotClassificationProcessor extends NlpTask.Processor { "zero_shot_classification requires [entailment] and [contradiction] in classification_labels" ); } - this.labels = Optional.ofNullable(config.getLabels()).orElse(List.of()).toArray(String[]::new); + this.labels = config.getLabels().orElse(List.of()).toArray(String[]::new); this.hypothesisTemplate = config.getHypothesisTemplate(); this.isMultiLabel = config.isMultiLabel(); this.resultsField = config.getResultsField(); @@ -67,7 +67,7 @@ public void validateInputs(List inputs) { public NlpTask.RequestBuilder getRequestBuilder(NlpConfig nlpConfig) { final String[] labelsValue; if (nlpConfig instanceof ZeroShotClassificationConfig zeroShotConfig) { - labelsValue = zeroShotConfig.getLabels().toArray(new String[0]); + labelsValue = zeroShotConfig.getLabels().orElse(List.of()).toArray(new String[0]); } else { labelsValue = this.labels; } @@ -83,7 +83,7 @@ public NlpTask.ResultProcessor getResultProcessor(NlpConfig nlpConfig) { final boolean isMultiLabelValue; final String resultsFieldValue; if (nlpConfig instanceof ZeroShotClassificationConfig zeroShotConfig) { - labelsValue = zeroShotConfig.getLabels().toArray(new String[0]); + labelsValue = zeroShotConfig.getLabels().orElse(List.of()).toArray(new String[0]); isMultiLabelValue = zeroShotConfig.isMultiLabel(); resultsFieldValue = zeroShotConfig.getResultsField(); } else { From d9dc3a9629f699ca32d4268a4783e8d4098fc96f Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 2 Aug 2022 17:30:18 +0200 Subject: [PATCH 060/265] Preemptively initialize routing nodes and indices lookup on all node types (#89032) Follow up to #89005 running the initialization as soon as possible on non-master nodes as well. --- docs/changelog/89032.yaml | 5 ++++ .../elasticsearch/cluster/ClusterState.java | 14 +++++++++++ .../cluster/coordination/Coordinator.java | 25 +++++++++++-------- .../cluster/metadata/Metadata.java | 4 +++ .../cluster/service/MasterService.java | 3 +-- .../FakeThreadPoolMasterServiceTests.java | 3 +-- 6 files changed, 39 insertions(+), 15 deletions(-) create mode 100644 docs/changelog/89032.yaml diff --git a/docs/changelog/89032.yaml b/docs/changelog/89032.yaml new file mode 100644 index 0000000000000..b841adee057e2 --- /dev/null +++ b/docs/changelog/89032.yaml @@ -0,0 +1,5 @@ +pr: 89032 +summary: Preemptively initialize routing nodes and indices lookup on all node types +area: Cluster Coordination +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index d64df93812ac0..60008d3f736ca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -54,6 +54,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.Executor; import java.util.function.Consumer; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; @@ -351,6 +352,19 @@ public RoutingNodes mutableRoutingNodes() { return RoutingNodes.mutable(routingTable, this.nodes); } + /** + * Initialize data structures that lazy computed for this instance in the background by using the giving executor. + * @param executor executor to run initialization tasks on + */ + public void initializeAsync(Executor executor) { + if (routingNodes == null) { + executor.execute(this::getRoutingNodes); + } + if (metadata.indicesLookupInitialized() == false) { + executor.execute(metadata::getIndicesLookup); + } + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 957b3cf37c03d..f583bf090d59f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -414,8 +414,14 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { assert publishRequest.getAcceptedState().nodes().getLocalNode().equals(getLocalNode()) : publishRequest.getAcceptedState().nodes().getLocalNode() + " != " + getLocalNode(); + final ClusterState newClusterState = publishRequest.getAcceptedState(); + if (newClusterState.nodes().isLocalNodeElectedMaster() == false) { + // background initialization on the current master has been started by the master service already + newClusterState.initializeAsync(transportService.getThreadPool().generic()); + } + synchronized (mutex) { - final DiscoveryNode sourceNode = publishRequest.getAcceptedState().nodes().getMasterNode(); + final DiscoveryNode sourceNode = newClusterState.nodes().getMasterNode(); logger.trace("handlePublishRequest: handling [{}] from [{}]", publishRequest, sourceNode); if (sourceNode.equals(getLocalNode()) && mode != Mode.LEADER) { @@ -427,30 +433,30 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { final ClusterState localState = coordinationState.get().getLastAcceptedState(); if (localState.metadata().clusterUUIDCommitted() - && localState.metadata().clusterUUID().equals(publishRequest.getAcceptedState().metadata().clusterUUID()) == false) { + && localState.metadata().clusterUUID().equals(newClusterState.metadata().clusterUUID()) == false) { logger.warn( "received cluster state from {} with a different cluster uuid {} than local cluster uuid {}, rejecting", sourceNode, - publishRequest.getAcceptedState().metadata().clusterUUID(), + newClusterState.metadata().clusterUUID(), localState.metadata().clusterUUID() ); throw new CoordinationStateRejectedException( "received cluster state from " + sourceNode + " with a different cluster uuid " - + publishRequest.getAcceptedState().metadata().clusterUUID() + + newClusterState.metadata().clusterUUID() + " than local cluster uuid " + localState.metadata().clusterUUID() + ", rejecting" ); } - if (publishRequest.getAcceptedState().term() > localState.term()) { + if (newClusterState.term() > localState.term()) { // only do join validation if we have not accepted state from this master yet - onJoinValidators.forEach(a -> a.accept(getLocalNode(), publishRequest.getAcceptedState())); + onJoinValidators.forEach(a -> a.accept(getLocalNode(), newClusterState)); } - ensureTermAtLeast(sourceNode, publishRequest.getAcceptedState().term()); + ensureTermAtLeast(sourceNode, newClusterState.term()); final PublishResponse publishResponse = coordinationState.get().handlePublishRequest(publishRequest); if (sourceNode.equals(getLocalNode())) { @@ -459,10 +465,7 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { becomeFollower("handlePublishRequest", sourceNode); // also updates preVoteCollector } - return new PublishWithJoinResponse( - publishResponse, - joinWithDestination(lastJoin, sourceNode, publishRequest.getAcceptedState().term()) - ); + return new PublishWithJoinResponse(publishResponse, joinWithDestination(lastJoin, sourceNode, newClusterState.term())); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 38d39d64cab4b..c18bdb6e97d1c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -509,6 +509,10 @@ public boolean equalsAliases(Metadata other) { return true; } + public boolean indicesLookupInitialized() { + return indicesLookup != null; + } + public SortedMap getIndicesLookup() { SortedMap lookup = indicesLookup; if (lookup == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 906348374c182..922b39ac71fd9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -332,8 +332,7 @@ public String getDescription() { logger.debug("publishing cluster state version [{}]", newClusterState.version()); // initialize routing nodes and the indices lookup concurrently, we will need both of them for the cluster state // application and can compute them while we wait for the other nodes during publication - threadPool.generic().execute(newClusterState::getRoutingNodes); - threadPool.generic().execute(newClusterState.metadata()::getIndicesLookup); + newClusterState.initializeAsync(threadPool.generic()); publish( clusterStatePublicationEvent, new CompositeTaskAckListener( diff --git a/test/framework/src/test/java/org/elasticsearch/cluster/service/FakeThreadPoolMasterServiceTests.java b/test/framework/src/test/java/org/elasticsearch/cluster/service/FakeThreadPoolMasterServiceTests.java index 72d80438e05bc..535c662dc9b80 100644 --- a/test/framework/src/test/java/org/elasticsearch/cluster/service/FakeThreadPoolMasterServiceTests.java +++ b/test/framework/src/test/java/org/elasticsearch/cluster/service/FakeThreadPoolMasterServiceTests.java @@ -142,8 +142,7 @@ public void onFailure(Exception e) { runnableTasks.remove(0).run(); // schedule again - // run tasks for computing routing nodes and indices lookup - runnableTasks.remove(0).run(); + // run task for computing missing indices lookup runnableTasks.remove(0).run(); runnableTasks.remove(0).run(); // publish again From 9ce59bb7a982197cbeebbc7c45a3ded952821eae Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 2 Aug 2022 12:17:14 -0400 Subject: [PATCH 061/265] [ML] add text_similarity nlp task documentation (#88994) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduced in: #88439 * [ML] add text_similarity nlp task documentation * Apply suggestions from code review Co-authored-by: István Zoltán Szabó * Update docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc Co-authored-by: István Zoltán Szabó * Apply suggestions from code review Co-authored-by: István Zoltán Szabó * Update docs/reference/ml/ml-shared.asciidoc Co-authored-by: István Zoltán Szabó Co-authored-by: István Zoltán Szabó --- docs/reference/ml/ml-shared.asciidoc | 24 +++- .../apis/get-trained-models.asciidoc | 112 +++++++++++++++++ .../apis/infer-trained-model.asciidoc | 89 ++++++++++++-- .../apis/put-trained-models.asciidoc | 113 +++++++++++++++++- 4 files changed, 320 insertions(+), 18 deletions(-) diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index b2e5f02c366f5..d3d73460b48d7 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -1051,8 +1051,8 @@ results are returned to the caller. end::inference-config-pass-through[] tag::inference-config-nlp-question-answering[] -Configures a question answering natural language processing (NLP) task. Question -answering is useful for extracting answers for certain questions from a large +Configures a question answering natural language processing (NLP) task. Question +answering is useful for extracting answers for certain questions from a large corpus of text. end::inference-config-nlp-question-answering[] @@ -1070,6 +1070,26 @@ context. These embeddings can be used in a <> field for powerful insights. end::inference-config-text-embedding[] +tag::inference-config-text-similarity[] +Text similarity takes an input sequence and compares it with another input sequence. This is commonly referred to +as cross-encoding. This task is useful for ranking document text when comparing it to another provided text input. +end::inference-config-text-similarity[] + +tag::inference-config-text-similarity-span-score-func[] +Identifies how to combine the resulting similarity score when a provided text passage is longer than `max_sequence_length` and must be +automatically separated for multiple calls. This only is applicable when `truncate` is `none` and `span` is a non-negative +number. The default value is `max`. Available options are: ++ +-- +* `max`: The maximum score from all the spans is returned. +* `mean`: The mean score over all the spans is returned. +-- +end::inference-config-text-similarity-span-score-func[] + +tag::inference-config-text-similarity-text[] +This is the text with which to compare all document provided text inputs. +end::inference-config-text-similarity-text[] + tag::inference-config-regression-num-top-feature-importance-values[] Specifies the maximum number of {ml-docs}/ml-feature-importance.html[{feat-imp}] values per document. diff --git a/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc index 5275671c7c411..79c19b7c5b9f3 100644 --- a/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc @@ -674,6 +674,118 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenizati (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +`with_special_tokens`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] +======== +======= +`vocabulary`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-vocabulary] ++ +.Properties of vocabulary +[%collapsible%open] +======= +`index`:::: +(Required, string) +The index where the vocabulary is stored. +======= +====== +`text_similarity`:::: +(Object, optional) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity] ++ +.Properties of text_similarity inference +[%collapsible%open] +====== +`span_score_combination_function`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity-span-score-func] + +`tokenization`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] ++ +.Properties of tokenization +[%collapsible%open] +======= +`bert`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] ++ +.Properties of bert +[%collapsible%open] +======== +`do_lower_case`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] + +`max_sequence_length`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] + +`span`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] + +`with_special_tokens`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] +======== +`roberta`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] ++ +.Properties of roberta +[%collapsible%open] +======== +`add_prefix_space`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] + +`max_sequence_length`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] + +`span`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] + +`with_special_tokens`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] +======== +`mpnet`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] ++ +.Properties of mpnet +[%collapsible%open] +======== +`do_lower_case`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] + +`max_sequence_length`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] + +`span`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] + `with_special_tokens`:::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] diff --git a/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc b/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc index 87781ecb18d8f..b86e2e370ac79 100644 --- a/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc +++ b/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc @@ -46,20 +46,20 @@ Controls the amount of time to wait for {infer} results. Defaults to 10 seconds. `docs`:: (Required, array) An array of objects to pass to the model for inference. The objects should -contain the fields matching your configured trained model input. Typically for -NLP models, the field name is `text_field`. Currently for NLP models, only a -single value is allowed. For {dfanalytics} or imported classification or +contain the fields matching your configured trained model input. Typically for +NLP models, the field name is `text_field`. Currently for NLP models, only a +single value is allowed. For {dfanalytics} or imported classification or regression models, more than one value is allowed. //Begin inference_config `inference_config`:: (Required, object) The default configuration for inference. This can be: `regression`, -`classification`, `fill_mask`, `ner`, `question_answering`, +`classification`, `fill_mask`, `ner`, `question_answering`, `text_classification`, `text_embedding` or `zero_shot_classification`. If `regression` or `classification`, it must match the `target_type` of the -underlying `definition.trained_model`. If `fill_mask`, `ner`, -`question_answering`, `text_classification`, or `text_embedding`; the +underlying `definition.trained_model`. If `fill_mask`, `ner`, +`question_answering`, `text_classification`, or `text_embedding`; the `model_type` must be `pytorch`. + .Properties of `inference_config` @@ -286,7 +286,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -Recommended to set `max_sequence_length` to `386` with `128` of `span` and set +Recommended to set `max_sequence_length` to `386` with `128` of `span` and set `truncate` to `none`. + .Properties of tokenization @@ -475,6 +475,75 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenizati .Properties of mpnet [%collapsible%open] ======= +`truncate`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +======= +====== +===== +`text_similarity`:::: +(Object, optional) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity] ++ +.Properties of text_similarity inference +[%collapsible%open] +===== +`span_score_combination_function`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity-span-score-func] + +`text`:::: +(Required, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity-text] + +`tokenization`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] ++ +.Properties of tokenization +[%collapsible%open] +====== +`bert`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] ++ +.Properties of bert +[%collapsible%open] +======= +`span`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`with_special_tokens`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] +======= +`roberta`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] ++ +.Properties of roberta +[%collapsible%open] +======= +`span`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +======= +`mpnet`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] ++ +.Properties of mpnet +[%collapsible%open] +======= +`span`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + `truncate`:::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] @@ -654,7 +723,7 @@ The API returns in this case: ---- // NOTCONSOLE -Zero-shot classification models require extra configuration defining the class +Zero-shot classification models require extra configuration defining the class labels. These labels are passed in the zero-shot inference config. [source,console] @@ -681,7 +750,7 @@ POST _ml/trained_models/model2/_infer -------------------------------------------------- // TEST[skip:TBD] -The API returns the predicted label and the confidence, as well as the top +The API returns the predicted label and the confidence, as well as the top classes: [source,console-result] @@ -717,7 +786,7 @@ classes: ---- // NOTCONSOLE -Question answering models require extra configuration defining the question to +Question answering models require extra configuration defining the question to answer. [source,console] diff --git a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc index 3b31f1c01626a..73dd7294ce0a2 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc @@ -384,11 +384,11 @@ the model definition is not supplied. `inference_config`:: (Required, object) The default configuration for inference. This can be: `regression`, -`classification`, `fill_mask`, `ner`, `question_answering`, +`classification`, `fill_mask`, `ner`, `question_answering`, `text_classification`, `text_embedding` or `zero_shot_classification`. If `regression` or `classification`, it must match the `target_type` of the -underlying `definition.trained_model`. If `fill_mask`, `ner`, -`question_answering`, `text_classification`, or `text_embedding`; the +underlying `definition.trained_model`. If `fill_mask`, `ner`, +`question_answering`, `text_classification`, or `text_embedding`; the `model_type` must be `pytorch`. + .Properties of `inference_config` @@ -525,9 +525,9 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-ner] ===== `classification_labels`:::: (Optional, string) -An array of classification labels. NER only supports Inside-Outside-Beginning +An array of classification labels. NER only supports Inside-Outside-Beginning labels (IOB) and only persons, organizations, locations, and miscellaneous. -Example: ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", +Example: ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"] `results_field`:::: @@ -722,7 +722,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -Recommended to set `max_sentence_length` to `386` with `128` of `span` and set +Recommended to set `max_sentence_length` to `386` with `128` of `span` and set `truncate` to `none`. + .Properties of tokenization @@ -1015,6 +1015,107 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenizati (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +`with_special_tokens`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] +======= +====== +===== +`text_similarity`:::: +(Object, optional) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity] ++ +.Properties of text_similarity inference +[%collapsible%open] +===== +`span_score_combination_function`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity-span-score-func] + +`tokenization`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] ++ +.Properties of tokenization +[%collapsible%open] +====== +`bert`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] ++ +.Properties of bert +[%collapsible%open] +======= +`do_lower_case`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] + +`max_sequence_length`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] + +`span`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] + +`with_special_tokens`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] +======= +`roberta`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] ++ +.Properties of roberta +[%collapsible%open] +======= +`add_prefix_space`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] + +`max_sequence_length`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] + +`span`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] + +`with_special_tokens`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] +======= +`mpnet`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] ++ +.Properties of mpnet +[%collapsible%open] +======= +`do_lower_case`:::: +(Optional, boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] + +`max_sequence_length`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] + +`span`:::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] + `with_special_tokens`:::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] From 9b0230313820bc95567147877212d0a3180e311c Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Tue, 2 Aug 2022 19:05:11 +0200 Subject: [PATCH 062/265] Docs: Remove paragraph that applies only before Elasticsearch 7.0 (#86209) --- docs/reference/ccr/getting-started.asciidoc | 8 -------- 1 file changed, 8 deletions(-) diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index 7b8a938132800..c5a2ac98e4e32 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -159,14 +159,6 @@ cluster with cluster alias `leader`. connected to. ==== -[[ccr-enable-soft-deletes]] -==== Enable soft deletes on leader indices -To follow an index, it must have been created with -<> enabled. If the index doesn’t have -soft deletes enabled, you must reindex it and use the new index as the leader -index. Soft deletes are enabled by default on new indices -created with {es} 7.0.0 and later. - include::../../../x-pack/docs/en/security/authentication/remote-clusters-privileges.asciidoc[tag=configure-ccr-privileges] [[ccr-getting-started-follower-index]] From 3bb4a84bdd38f280f751990954930c24f4f6f889 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Tue, 2 Aug 2022 10:13:58 -0700 Subject: [PATCH 063/265] Support source fallback for double, float, and half_float field types (#89010) This change adds a SourceValueFetcherSortedDoubleIndexFieldData to support double doc values types for source fallback. This also adds support for double, float and half_float field types. --- docs/changelog/89010.yaml | 5 + .../test/painless/50_script_doc_values.yml | 291 ++++++++++++++++++ ...lueFetcherMultiGeoPointIndexFieldData.java | 6 +- ...alueFetcherSortedDoubleIndexFieldData.java | 131 ++++++++ ...lueFetcherSortedNumericIndexFieldData.java | 1 + .../index/mapper/NumberFieldMapper.java | 46 +++ 6 files changed, 479 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/89010.yaml create mode 100644 server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java diff --git a/docs/changelog/89010.yaml b/docs/changelog/89010.yaml new file mode 100644 index 0000000000000..02d91128a12e3 --- /dev/null +++ b/docs/changelog/89010.yaml @@ -0,0 +1,5 @@ +pr: 89010 +summary: "Support source fallback for double, float, and `half_float` field types" +area: Mapping +type: enhancement +issues: [] diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml index 761787365d38e..8167de737fcb3 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -47,10 +47,19 @@ setup: doc_values: false double: type: double + double_no_doc_values: + type: double + doc_values: false float: type: float + float_no_doc_values: + type: float + doc_values: false half_float: type: half_float + half_float_no_doc_values: + type: half_float + doc_values: false scaled_float: type: scaled_float scaling_factor: 100 @@ -86,8 +95,11 @@ setup: byte: 12 byte_no_doc_values: 12 double: 3.14159265358979 + double_no_doc_values: 3.14159265358979 float: 3.141592654 + float_no_doc_values: 3.141592654 half_float: 3.140625 + half_float_no_doc_values: 3.140625 scaled_float: 3.14 token_count: count all these words please @@ -121,8 +133,11 @@ setup: byte: [16, 32, 64, 8, 4] byte_no_doc_values: [16, 8, 32, 4, 64] double: [3.141592653588, 2.141592653587] + double_no_doc_values: [3.141592653588, 2.141592653587] float: [1.123, 2.234] + float_no_doc_values: [2.234, 1.123] half_float: [1.123, 2.234] + half_float_no_doc_values: [2.234, 1.123] scaled_float: [-3.5, 2.5] @@ -1951,6 +1966,97 @@ setup: - match: { hits.hits.1.fields.field.0: 0 } - match: { hits.hits.2.fields.field.0: 7.283185307175 } +--- +"double_no_doc_values": + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['double_no_doc_values'].get(0)" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['double_no_doc_values'].value" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('double_no_doc_values').get(-1)" + - match: { hits.hits.0.fields.field.0: 3.14159265358979 } + - match: { hits.hits.1.fields.field.0: -1 } + - match: { hits.hits.2.fields.field.0: 2.141592653587 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('double_no_doc_values', -1)" + - match: { hits.hits.0.fields.field.0: 3.14159265358979 } + - match: { hits.hits.1.fields.field.0: -1 } + - match: { hits.hits.2.fields.field.0: 2.141592653587 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "double defaultDouble = 7.8; field('double_no_doc_values').get(1, defaultDouble)" + - match: { hits.hits.0.fields.field.0: 7.8 } + - match: { hits.hits.1.fields.field.0: 7.8 } + - match: { hits.hits.2.fields.field.0: 3.141592653588 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('double_no_doc_values').get(1, 9.2)" + - match: { hits.hits.0.fields.field.0: 9.2 } + - match: { hits.hits.1.fields.field.0: 9.2 } + - match: { hits.hits.2.fields.field.0: 3.141592653588 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "double total = 0; for (double d : field('double_no_doc_values')) { total += d; } total + field('double_no_doc_values').size();" + - match: { hits.hits.0.fields.field.0: 4.14159265358979 } + - match: { hits.hits.1.fields.field.0: 0 } + - match: { hits.hits.2.fields.field.0: 7.283185307175 } + --- "float": - do: @@ -2040,6 +2146,97 @@ setup: - match: { hits.hits.1.fields.field.0: "0.0" } - match: { hits.hits.2.fields.field.0: "5.357" } +--- +"float_no_doc_values": + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['float_no_doc_values'].get(0)" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['float_no_doc_values'].value" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('float_no_doc_values').get(-1).toString()" # toString to avoid making this a double + - match: { hits.hits.0.fields.field.0: "3.1415927" } + - match: { hits.hits.1.fields.field.0: "-1.0" } + - match: { hits.hits.2.fields.field.0: "1.123" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "return $('float_no_doc_values', -1).toString()" # toString to avoid making this a double + - match: { hits.hits.0.fields.field.0: "3.1415927" } + - match: { hits.hits.1.fields.field.0: "-1.0" } + - match: { hits.hits.2.fields.field.0: "1.123" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "float defaultFloat = 7.8f; field('float_no_doc_values').get(1, defaultFloat).toString()" + - match: { hits.hits.0.fields.field.0: "7.8" } + - match: { hits.hits.1.fields.field.0: "7.8" } + - match: { hits.hits.2.fields.field.0: "2.234" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('float_no_doc_values').get(1, 9.2f).toString()" + - match: { hits.hits.0.fields.field.0: "9.2" } + - match: { hits.hits.1.fields.field.0: "9.2" } + - match: { hits.hits.2.fields.field.0: "2.234" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "float total = 0; for (float f : field('float_no_doc_values')) { total += f; } Float.toString(total + field('float_no_doc_values').size());" + - match: { hits.hits.0.fields.field.0: "4.141593" } + - match: { hits.hits.1.fields.field.0: "0.0" } + - match: { hits.hits.2.fields.field.0: "5.357" } + --- "half_float": - skip: @@ -2132,6 +2329,100 @@ setup: - match: { hits.hits.1.fields.field.0: 0.0 } - close_to: { hits.hits.2.fields.field.0: { value: 2.234, error: 0.001 } } +--- +"half_float_no_doc_values": + - skip: + features: close_to + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['half_float_no_doc_values'].get(0)" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['float_no_doc_values'].value" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('half_float_no_doc_values').get(0.0)" + - close_to: { hits.hits.0.fields.field.0: { value: 3.140625, error: 0.001 } } + - match: { hits.hits.1.fields.field.0: 0.0 } + - close_to: { hits.hits.2.fields.field.0: { value: 1.123, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "/* avoid stash */ $('half_float_no_doc_values', 0.0)" + - close_to: { hits.hits.0.fields.field.0: { value: 3.140625, error: 0.001 } } + - match: { hits.hits.1.fields.field.0: 0.0 } + - close_to: { hits.hits.2.fields.field.0: { value: 1.123, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('half_float_no_doc_values').get(1, 0.0)" + - match: { hits.hits.0.fields.field.0: 0.0 } + - match: { hits.hits.1.fields.field.0: 0.0 } + - close_to: { hits.hits.2.fields.field.0: { value: 2.234, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('half_float_no_doc_values').asDouble(0.0)" + - close_to: { hits.hits.0.fields.field.0: { value: 3.140625, error: 0.001 } } + - match: { hits.hits.1.fields.field.0: 0.0 } + - close_to: { hits.hits.2.fields.field.0: { value: 1.123, error: 0.0001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('half_float_no_doc_values').asDouble(1, 0.0)" + - match: { hits.hits.0.fields.field.0: 0.0 } + - match: { hits.hits.1.fields.field.0: 0.0 } + - close_to: { hits.hits.2.fields.field.0: { value: 2.234, error: 0.001 } } + --- "scaled_float": - do: diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java index 9c54be22a4f27..5ee4f860e19f1 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java @@ -103,8 +103,12 @@ public boolean advanceExact(int doc) throws IOException { values = new TreeSet<>(); for (Object value : valueFetcher.fetchValues(sourceLookup, Collections.emptyList())) { + assert value instanceof Map && ((Map) value).get("coordinates") instanceof List; List coordinates = ((Map>) value).get("coordinates"); - values.add(new GeoPoint((double) coordinates.get(1), (double) coordinates.get(0)).getEncoded()); + assert coordinates.size() == 2 && coordinates.get(1) instanceof Number && coordinates.get(0) instanceof Number; + values.add( + new GeoPoint(((Number) coordinates.get(1)).doubleValue(), ((Number) coordinates.get(0)).doubleValue()).getEncoded() + ); } iterator = values.iterator(); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java new file mode 100644 index 0000000000000..dea18d6eae6d1 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.fielddata; + +import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.index.mapper.ValueFetcher; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.script.field.DocValuesScriptFieldFactory; +import org.elasticsearch.script.field.ToScriptFieldFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; +import java.util.TreeSet; + +public class SourceValueFetcherSortedDoubleIndexFieldData extends SourceValueFetcherIndexFieldData { + + public static class Builder extends SourceValueFetcherIndexFieldData.Builder { + + public Builder( + String fieldName, + ValuesSourceType valuesSourceType, + ValueFetcher valueFetcher, + SourceLookup sourceLookup, + ToScriptFieldFactory toScriptFieldFactory + ) { + super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + } + + @Override + public SourceValueFetcherSortedDoubleIndexFieldData build(IndexFieldDataCache cache, CircuitBreakerService breakerService) { + return new SourceValueFetcherSortedDoubleIndexFieldData( + fieldName, + valuesSourceType, + valueFetcher, + sourceLookup, + toScriptFieldFactory + ); + } + } + + protected SourceValueFetcherSortedDoubleIndexFieldData( + String fieldName, + ValuesSourceType valuesSourceType, + ValueFetcher valueFetcher, + SourceLookup sourceLookup, + ToScriptFieldFactory toScriptFieldFactory + ) { + super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + } + + @Override + public SourceValueFetcherLeafFieldData loadDirect(LeafReaderContext context) throws Exception { + return new SourceValueFetcherSortedDoubleLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceLookup); + } + + private static class SourceValueFetcherSortedDoubleLeafFieldData extends SourceValueFetcherLeafFieldData { + + private SourceValueFetcherSortedDoubleLeafFieldData( + ToScriptFieldFactory toScriptFieldFactory, + LeafReaderContext leafReaderContext, + ValueFetcher valueFetcher, + SourceLookup sourceLookup + ) { + super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceLookup); + } + + @Override + public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { + return toScriptFieldFactory.getScriptFieldFactory( + new SourceValueFetcherSortedNumericDoubleValues(leafReaderContext, valueFetcher, sourceLookup), + name + ); + } + } + + private static class SourceValueFetcherSortedNumericDoubleValues extends SortedNumericDoubleValues implements ValueFetcherDocValues { + + private final LeafReaderContext leafReaderContext; + + private final ValueFetcher valueFetcher; + private final SourceLookup sourceLookup; + + private TreeSet values; + private Iterator iterator; + + private SourceValueFetcherSortedNumericDoubleValues( + LeafReaderContext leafReaderContext, + ValueFetcher valueFetcher, + SourceLookup sourceLookup + ) { + this.leafReaderContext = leafReaderContext; + this.valueFetcher = valueFetcher; + this.sourceLookup = sourceLookup; + } + + @Override + public boolean advanceExact(int doc) throws IOException { + sourceLookup.setSegmentAndDocument(leafReaderContext, doc); + values = new TreeSet<>(); + + for (Object value : valueFetcher.fetchValues(sourceLookup, Collections.emptyList())) { + assert value instanceof Number; + values.add(((Number) value).doubleValue()); + } + + iterator = values.iterator(); + + return true; + } + + @Override + public int docValueCount() { + return values.size(); + } + + @Override + public double nextValue() throws IOException { + assert iterator.hasNext(); + return iterator.next(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java index 671c547155cdd..70b7db917e20a 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java @@ -109,6 +109,7 @@ public boolean advanceExact(int doc) throws IOException { values = new TreeSet<>(); for (Object value : valueFetcher.fetchValues(sourceLookup, Collections.emptyList())) { + assert value instanceof Number; values.add(((Number) value).longValue()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 9890e08568784..1210c23880a64 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -36,6 +36,7 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.SourceValueFetcherSortedDoubleIndexFieldData; import org.elasticsearch.index.fielddata.SourceValueFetcherSortedNumericIndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedDoublesIndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; @@ -372,6 +373,21 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedDoublesIndexFieldData.Builder(name, numericType(), HalfFloatDocValuesField::new); } + @Override + public IndexFieldData.Builder getValueFetcherFieldDataBuilder( + String name, + SourceLookup sourceLookup, + ValueFetcher valueFetcher + ) { + return new SourceValueFetcherSortedDoubleIndexFieldData.Builder( + name, + numericType().getValuesSourceType(), + valueFetcher, + sourceLookup, + HalfFloatDocValuesField::new + ); + } + private static void validateParsed(float value) { if (Float.isFinite(HalfFloatPoint.sortableShortToHalfFloat(HalfFloatPoint.halfFloatToSortableShort(value))) == false) { throw new IllegalArgumentException("[half_float] supports only finite values, but got [" + value + "]"); @@ -507,6 +523,21 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedDoublesIndexFieldData.Builder(name, numericType(), FloatDocValuesField::new); } + @Override + public IndexFieldData.Builder getValueFetcherFieldDataBuilder( + String name, + SourceLookup sourceLookup, + ValueFetcher valueFetcher + ) { + return new SourceValueFetcherSortedDoubleIndexFieldData.Builder( + name, + numericType().getValuesSourceType(), + valueFetcher, + sourceLookup, + FloatDocValuesField::new + ); + } + private static void validateParsed(float value) { if (Float.isFinite(value) == false) { throw new IllegalArgumentException("[float] supports only finite values, but got [" + value + "]"); @@ -620,6 +651,21 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedDoublesIndexFieldData.Builder(name, numericType(), DoubleDocValuesField::new); } + @Override + public IndexFieldData.Builder getValueFetcherFieldDataBuilder( + String name, + SourceLookup sourceLookup, + ValueFetcher valueFetcher + ) { + return new SourceValueFetcherSortedDoubleIndexFieldData.Builder( + name, + numericType().getValuesSourceType(), + valueFetcher, + sourceLookup, + DoubleDocValuesField::new + ); + } + private static void validateParsed(double value) { if (Double.isFinite(value) == false) { throw new IllegalArgumentException("[double] supports only finite values, but got [" + value + "]"); From 88afbcdbb7806dcc0f97538ffb5a17edada2b9d9 Mon Sep 17 00:00:00 2001 From: Justin Cranford <89857999+justincr-elastic@users.noreply.github.com> Date: Tue, 2 Aug 2022 14:09:46 -0400 Subject: [PATCH 064/265] Add PKC JWKSet reloading to JWT authentication doc (#88692) --- .../authentication/jwt-realm.asciidoc | 94 ++++++++++++++----- 1 file changed, 71 insertions(+), 23 deletions(-) diff --git a/x-pack/docs/en/security/authentication/jwt-realm.asciidoc b/x-pack/docs/en/security/authentication/jwt-realm.asciidoc index 24edf2b2a9597..897047c0de974 100644 --- a/x-pack/docs/en/security/authentication/jwt-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/jwt-realm.asciidoc @@ -4,16 +4,16 @@ beta::[] -{es} can be configured to trust JSON Web Tokens (JWTs) that are issued as an -authentication credential from an external service. +{es} can be configured to trust JSON Web Tokens (JWTs) issued from an external service +as bearer tokens for authentication. When a JWT realm is used to authenticate with {es}, a distinction is made between the _client_ that is connecting to {es}, and the _user_ on whose behalf -the request should run. The JWT identifies the user, and a separate credential -is used to authenticate the client. +the request should run. The JWT authenticates the user, and a separate credential +authenticates the client. -A common scenario that uses JWTs is when an existing front-end application uses -OpenID Connect (OIDC) as an authentication method, and then accesses {es} +A common scenario for JWTs is when an existing front-end application uses +OpenID Connect (OIDC) to authenticate and identify a user, and then accesses {es} on behalf of the authenticated user. TIP: If the front-end application does not exist, you can use the @@ -21,22 +21,27 @@ TIP: If the front-end application does not exist, you can use the [[jwt-realm-oidc]] ==== JWT uses OIDC workflows -JWT authentication in {es} is derived from OIDC workflows, where different +JWT authentication in {es} is derived from OIDC user workflows, where different tokens can be issued by an OIDC Provider (OP). One possible token is an _ID token_, which uses the JWT format. If the ID token is presented to a JWT -realm, {es} can use it to authenticate, identify, and authorize an individual +realm, {es} can use it as a bearer token to authenticate, identify, and authorize an individual user. -NOTE: Because JWTs are external to {es}, you can define a custom workflow +NOTE: Because JWTs are obtained external to {es}, you can define a custom workflow instead of using the OIDC workflow. However, the JWT format must still be JSON Web Signature (JWS). The JWS header and JWS signature are validated using OIDC ID token validation rules. {es} supports a separate <>, which provides -stronger security guarantees than the JWT realm and is preferred for any +stronger security guarantees than the JWT realm, and is preferred for any use case where {es} can act as an OIDC RP. The OIDC realm is the only supported way to enable OIDC authentication in {kib}. +TIP: If JWTs are issued for the front-end application, the application is the realm client and JWT user. +That is not supported by OIDC flows, but it may be supported by bespoke JWT issuers. +In that case, use the client secret and JWT for the client application, and the +`es-security-runas-user` HTTP request header for the different user. See <>. + [[jwt-realm-configuration]] ==== Configure {es} to use a JWT realm @@ -124,7 +129,7 @@ The file can be removed after you load the contents into the {es} keystore. [NOTE] ==== Using the JWKS is preferred. However, you can add an HMAC key in string format -using the following command. This format is compatible with OIDC HMAC keys, but +using the following command. This format is compatible with HMAC UTF-8 keys, but only supports a single key with no attributes. You can only use one HMAC format (either `hmac_jwkset` or `hmac_key`) simultaneously. @@ -196,6 +201,10 @@ NOTE: You can relax validation of any of the time-based claims by setting validating JWTs with respect to their authentication time (`auth_time`), creation (`iat`), not before (`nbf`), and expiration times (`exp`). +`iss`:: +(Required, String) Denotes the issuer that created the ID token. The value must +be an exact, case-sensitive match to the value in the `allowed_issuer` setting. + `aud`:: (Required, String) Indicates the audiences that the ID token is for, expressed as a comma-separated value (CSV). One of the values must be an exact, case-sensitive @@ -209,10 +218,6 @@ milliseconds since epoch. (Required, integer) Time that the ID token was issued, expressed in UTC milliseconds since epoch. -`iss`:: -(Required, String) Denotes the issuer that created the ID token. The value must -be an exact, case-sensitive match to the value in the `allowed_issuer` setting. - `nbf`:: (Optional, integer) Indicates the time before which the JWT must not be accepted, expressed as UTC milliseconds since epoch. @@ -259,7 +264,7 @@ setting `claims.dn_pattern` to extract a substring value. ==== JWT realm authorization The JWT realm supports authorization with the create or update role mappings API, or delegating authorization to another realm. You cannot use these methods -simultaneously, so choose whichever works best for your environment. +simultaneously, so choose whichever works best for your environment. IMPORTANT: You cannot map roles in the JWT realm using the `role_mapping.yml` file. @@ -352,7 +357,7 @@ linked to realm `native1`. [[jwt-realm-runas]] ===== Applying the `run_as` privilege to JWT realm users -{es} can retrieve roles for a JWT user through either role mapping or +{es} can retrieve roles for a JWT user through either role mapping or delegated authorization. Regardless of which option you choose, you can apply the <> to a role so that a user can submit authenticated requests to "run as" a different user. To submit requests as @@ -415,7 +420,7 @@ the `jwt_role1` role that you mapped to this user in the JWT realm: "metadata":{"jwt_claim_email":"user2@something.example.com","jwt_claim_aud":["es01","es02","es03"], "jwt_claim_sub":"user2","jwt_claim_iss":"my-issuer"},"enabled":true,"authentication_realm": {"name":"jwt2","type":"jwt"},"lookup_realm":{"name":"jwt2","type":"jwt"},"authentication_type":"realm"} -% +% ---- If you want to specify a request as the `run_as` user, include the @@ -435,11 +440,54 @@ and {es} used the `jwt_role1` role: ---- {"username":"user123_runas","roles":["jwt_role1"],"full_name":null,"email":null,"metadata":{}, "enabled":true,"authentication_realm":{"name":"jwt2","type":"jwt"},"lookup_realm":{"name":"native", -"type":"native"},"authentication_type":"realm"}% +"type":"native"},"authentication_type":"realm"}% ---- +[[jwt-realm-jwkset-reloading]] +===== PKC JWKS reloading +JWT authentication supports signature verification using PKC (Public Key Cryptography) +or HMAC algorithms. + +PKC JSON Web Token Key Sets (JWKS) can contain public RSA and EC keys. HMAC JWKS +or an HMAC UTF-8 JWK contain secret keys. JWT issuers typically rotate PKC JWKS +more frequently (such as daily), because RSA and EC public keys are designed to +be easier to distribute than secret keys like HMAC. + +JWT realms load a PKC JWKS and an HMAC JWKS or HMAC UTF-8 JWK at startup. JWT +realms can also reload PKC JWKS contents at runtime; a reload is triggered by +signature validation failures. + +NOTE: HMAC JWKS or HMAC UTF-8 JWK reloading is not supported at this time. + +Load failures, parse errors, and configuration errors prevent a node from +starting (and restarting). However, runtime PKC reload errors and recoveries are +handled gracefully. + +All other JWT realm validations are checked before a signature failure can +trigger a PKC JWKS reload. If multiple JWT authentication signature failures +occur simultaneously with a single {es} node, reloads are combined to reduce +the reloads that are sent externally. + +Separate reload requests cannot be combined if JWT signature failures trigger: + +* PKC JWKS reloads in different {es} nodes +* PKC JWKS reloads in the same {es} node at different times + +[IMPORTANT] +==== +Enabling client authentication (`client_authentication.type`) is strongly +recommended. Only trusted client applications and realm-specific JWT users can +trigger PKC reload attempts. Additionally, configuring the following +<> is recommended: + +* `allowed_audiences` +* `allowed_clock_skew` +* `allowed_issuer` +* `allowed_signature_algorithms` +==== + [[hmac-oidc-example]] -==== Authorizing to the JWT realm with an OIDC HMAC key +==== Authorizing to the JWT realm with an HMAC UTF-8 key The following settings are for a JWT issuer, {es}, and a client of {es}. The example HMAC key is in an OIDC format that's compatible with HMAC. The key bytes are the UTF-8 encoding of the UNICODE characters. @@ -456,7 +504,7 @@ The following values are for the bespoke JWT issuer. Issuer: iss8 Audiences: aud8 Algorithms: HS256 -HMAC OIDC: hmac-oidc-key-string-for-hs256-algorithm +HMAC UTF-8: hmac-oidc-key-string-for-hs256-algorithm ---- // NOTCONSOLE @@ -477,7 +525,7 @@ xpack.security.authc.realms.jwt.jwt8.client_authentication.type: shared_secret realm chain on {ecloud}. ===== JWT realm secure settings -After defining the realm settings, use the +After defining the realm settings, use the {ref}/elasticsearch-keystore.html[`elasticsearch-keystore`] tool to add the following secure settings to the {es} keystore. In {ecloud}, you define settings for the {es} keystore under **Security** in your deployment. @@ -536,5 +584,5 @@ JWT realm itself. "metadata":{"jwt_claim_email":"user2@something.example.com","jwt_claim_aud":["es01","es02","es03"], "jwt_claim_sub":"user2","jwt_claim_iss":"my-issuer"},"enabled":true,"authentication_realm": {"name":"jwt2","type":"jwt"},"lookup_realm":{"name":"jwt2","type":"jwt"},"authentication_type":"realm"} -% +% ---- From d2f99f5baf0e3b0cc2c706c80a4f7bb69efe1649 Mon Sep 17 00:00:00 2001 From: Philip Krauss <35487337+philkra@users.noreply.github.com> Date: Tue, 2 Aug 2022 20:17:41 +0200 Subject: [PATCH 065/265] Change YAML test structure from list to object (#77700) This change converts the range query from an array to object. ``` range": { "number": [ { "gte": 4 } ] } ``` to ``` range": { "number": { "gte": 4 } } ``` --- .../rest-api-spec/test/data_stream/110_update_by_query.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/110_update_by_query.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/110_update_by_query.yml index 027b0b1f94050..ef39fe124cfae 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/110_update_by_query.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/110_update_by_query.yml @@ -75,7 +75,7 @@ query: range: number: - - gte: 4 + gte: 4 - match: {updated: 2} - match: {version_conflicts: 0} From 9e9f19bc9cae300f85973d0e5051aef42112e29c Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 2 Aug 2022 14:47:43 -0700 Subject: [PATCH 066/265] Further attempt at capturing reaper error logs --- .../src/main/groovy/elasticsearch.build-complete.gradle | 3 ++- .../main/java/org/elasticsearch/gradle/reaper/Reaper.java | 6 +++--- .../main/java/org/elasticsearch/gradle/ReaperService.java | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle index 32967f03c6879..f8024f65cf04f 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle @@ -23,7 +23,6 @@ if (buildNumber && performanceTest == null) { fileset(dir: projectDir) { Set fileSet = fileTree(projectDir) { include("**/*.hprof") - include(".gradle/reaper/**") include("**/build/test-results/**/*.xml") include("**/build/testclusters/**") exclude("**/build/testclusters/**/data/**") @@ -49,6 +48,8 @@ if (buildNumber && performanceTest == null) { } fileset(dir: "${gradle.gradleUserHomeDir}/workers", followsymlinks: false) + + fileset(dir: "${project.projectDir}/.gradle/reaper", followsymlinks: false) } } catch (Exception e) { logger.lifecycle("Failed to archive additional logs", e) diff --git a/build-tools/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java b/build-tools/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java index f5a24eba36872..e6c5b61e0a76c 100644 --- a/build-tools/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java +++ b/build-tools/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java @@ -83,17 +83,17 @@ private void reap() { delete(inputFile); } } - } catch (Exception e) { + } catch (Throwable e) { + failed = true; logFailure("Failed to reap inputs", e); } } - private void logFailure(String message, Exception e) { + private void logFailure(String message, Throwable e) { System.err.println(message); if (e != null) { e.printStackTrace(System.err); } - failed = true; } private void delete(Path toDelete) { diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java b/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java index ece27cef7b66f..d63efbe3e55cb 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java @@ -78,7 +78,7 @@ void shutdown() { logger.info("Waiting for reaper to exit normally"); if (reaperProcess.waitFor() != 0) { Path inputDir = getParameters().getInputDir().get().getAsFile().toPath(); - throw new GradleException("Reaper process failed. Check log at " + inputDir.resolve("error.log") + " for details"); + throw new GradleException("Reaper process failed. Check log at " + inputDir.resolve("reaper.log") + " for details"); } } catch (Exception e) { throw new RuntimeException(e); @@ -109,7 +109,7 @@ private synchronized void ensureReaperStarted() { builder.redirectInput(ProcessBuilder.Redirect.PIPE); File logFile = logFilePath().toFile(); builder.redirectOutput(logFile); - builder.redirectError(logFile); + builder.redirectErrorStream(); reaperProcess = builder.start(); } catch (Exception e) { throw new RuntimeException(e); From d3e057c33a8e3485be8e1b8eb009ac98f85cd092 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Wed, 3 Aug 2022 07:57:40 +0200 Subject: [PATCH 067/265] [Transform] improve error handling in state persistence (#88910) transform persists the internal state of a transform (e.g. the data cursor) in state document. This change improves the error handling and fixes the problem described in #88905. A transform can now recover from this problem. fixes #88905 --- docs/changelog/88910.yaml | 6 + .../transforms/TransformConfigTests.java | 29 + .../transforms/ClientTransformIndexer.java | 54 +- ...IndexerFailureOnStatePersistenceTests.java | 559 ++++++++++++++++++ 4 files changed, 642 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/88910.yaml create mode 100644 x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java diff --git a/docs/changelog/88910.yaml b/docs/changelog/88910.yaml new file mode 100644 index 0000000000000..581ae9d6e8d1c --- /dev/null +++ b/docs/changelog/88910.yaml @@ -0,0 +1,6 @@ +pr: 88910 +summary: Improve error handling in state persistence +area: Transform +type: bug +issues: + - 88905 diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java index 146255fec1796..17b4a97a4b818 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java @@ -122,6 +122,35 @@ public static TransformConfig randomTransformConfig(String id, Version version) return randomTransformConfig(id, version, pivotConfig, latestConfig); } + public static TransformConfig randomTransformConfigWithSettings(SettingsConfig settingsConfig) { + PivotConfig pivotConfig; + LatestConfig latestConfig; + if (randomBoolean()) { + pivotConfig = PivotConfigTests.randomPivotConfig(); + latestConfig = null; + } else { + pivotConfig = null; + latestConfig = LatestConfigTests.randomLatestConfig(); + } + + return new TransformConfig( + randomAlphaOfLengthBetween(1, 10), + randomSourceConfig(), + randomDestConfig(), + randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), + randomBoolean() ? null : randomSyncConfig(), + randomHeaders(), + pivotConfig, + latestConfig, + randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000), + settingsConfig, + randomBoolean() ? null : randomMetadata(), + randomBoolean() ? null : randomRetentionPolicyConfig(), + randomBoolean() ? null : Instant.now(), + null + ); + } + public static TransformConfig randomTransformConfig(String id, Version version, PivotConfig pivotConfig, LatestConfig latestConfig) { return new TransformConfig( id, diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java index 3a81e2a413701..6450e97658b53 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java @@ -61,8 +61,10 @@ import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.core.Strings.format; @@ -76,6 +78,9 @@ class ClientTransformIndexer extends TransformIndexer { private final AtomicBoolean oldStatsCleanedUp = new AtomicBoolean(false); private final AtomicReference seqNoPrimaryTermAndIndexHolder; + + // protected for unit tests + protected final AtomicInteger statePersistenceFailures = new AtomicInteger(); private final ConcurrentHashMap namedPits = new ConcurrentHashMap<>(); private volatile long pitCheckpoint; private volatile boolean disablePit = false; @@ -287,6 +292,7 @@ protected void persistState(TransformState state, ActionListener listener) seqNoPrimaryTermAndIndex, ActionListener.wrap(r -> { updateSeqNoPrimaryTermAndIndex(seqNoPrimaryTermAndIndex, r); + statePersistenceFailures.set(0); // Only do this clean up once, if it succeeded, no reason to do the query again. if (oldStatsCleanedUp.compareAndSet(false, true)) { @@ -311,10 +317,12 @@ protected void persistState(TransformState state, ActionListener listener) if (org.elasticsearch.ExceptionsHelper.unwrapCause(statsExc) instanceof VersionConflictEngineException) { // this should never happen, but indicates a race condition in state persistence: // - there should be only 1 save persistence at a time - // - this is not a catastrophic failure, if 2 state persistence calls run at the same time, 1 should succeed and update - // seqNoPrimaryTermAndIndex - // - for tests fail(assert), so we can debug the problem - logger.error( + // - there are reasons the seq_id, primary_term changes without user intervention, e.g. an internal + // retry (seq_id) or an unexpected node failure (primary_term), these are rare + // - in case re-get the versions and retry on the next persistence + // - the transform can (extremely unlikely) fail if state persistence fails in a row + // - for tests the number of allowed retries is set to 0 and therefore causes the transform to fail + logger.warn( () -> format( "[%s] updating stats of transform failed, unexpected version conflict of internal state, resetting to recover.", transformConfig.getId() @@ -326,16 +334,50 @@ protected void persistState(TransformState state, ActionListener listener) "Failure updating stats of transform, unexpected version conflict of internal state, resetting to recover: " + statsExc.getMessage() ); - assert false : "[" + getJobId() + "] updating stats of transform failed, unexpected version conflict of internal state"; + + if (handleStatePersistenceFailure(statsExc) == false) { + // get the current seqNo and primary term, however ignore the stored state + transformsConfigManager.getTransformStoredDoc( + transformConfig.getId(), + false, + ActionListener.wrap(storedDocAndSeqNoPrimaryTerm -> { + updateSeqNoPrimaryTermAndIndex(seqNoPrimaryTermAndIndex, storedDocAndSeqNoPrimaryTerm.v2()); + listener.onFailure(statsExc); + }, e2 -> listener.onFailure(statsExc)) + ); + // wrapped listener gets called + return; + } } else { - logger.error(() -> "[" + transformConfig.getId() + "] updating stats of transform failed.", statsExc); + logger.warn(() -> "[" + transformConfig.getId() + "] updating stats of transform failed.", statsExc); auditor.warning(getJobId(), "Failure updating stats of transform: " + statsExc.getMessage()); + handleStatePersistenceFailure(statsExc); } listener.onFailure(statsExc); }) ); } + private boolean handleStatePersistenceFailure(Exception statsExc) { + // we use the same setting for retries, however a separate counter, because the failure + // counter for search/index gets reset after a successful bulk index request + int numFailureRetries = Optional.ofNullable(transformConfig.getSettings().getNumFailureRetries()) + .orElse(context.getNumFailureRetries()); + + final int failureCount = statePersistenceFailures.incrementAndGet(); + + if (numFailureRetries != -1 && failureCount > numFailureRetries) { + failIndexer( + "task encountered more than " + + numFailureRetries + + " failures updating internal state; latest failure: " + + statsExc.getMessage() + ); + return true; + } + return false; + } + void updateSeqNoPrimaryTermAndIndex(SeqNoPrimaryTermAndIndex expectedValue, SeqNoPrimaryTermAndIndex newValue) { logger.debug(() -> format("[%s] Updated state document from [%s] to [%s]", transformConfig.getId(), expectedValue, newValue)); boolean updated = seqNoPrimaryTermAndIndexHolder.compareAndSet(expectedValue, newValue); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java new file mode 100644 index 0000000000000..82141503795fb --- /dev/null +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java @@ -0,0 +1,559 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform.transforms; + +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.TransformStoredDoc; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; +import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; +import org.elasticsearch.xpack.transform.TransformServices; +import org.elasticsearch.xpack.transform.checkpoint.CheckpointProvider; +import org.elasticsearch.xpack.transform.checkpoint.TransformCheckpointService; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.InMemoryTransformConfigManager; +import org.elasticsearch.xpack.transform.persistence.SeqNoPrimaryTermAndIndex; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; +import org.elasticsearch.xpack.transform.transforms.scheduling.TransformScheduler; + +import java.time.Clock; +import java.time.Instant; +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.isA; +import static org.mockito.Mockito.mock; + +public class TransformIndexerFailureOnStatePersistenceTests extends ESTestCase { + + private static class MockClientTransformIndexer extends ClientTransformIndexer { + + MockClientTransformIndexer( + ThreadPool threadPool, + TransformServices transformServices, + CheckpointProvider checkpointProvider, + AtomicReference initialState, + TransformIndexerPosition initialPosition, + Client client, + TransformIndexerStats initialStats, + TransformConfig transformConfig, + TransformProgress transformProgress, + TransformCheckpoint lastCheckpoint, + TransformCheckpoint nextCheckpoint, + SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, + TransformContext context, + boolean shouldStopAtCheckpoint + ) { + super( + threadPool, + transformServices, + checkpointProvider, + initialState, + initialPosition, + client, + initialStats, + transformConfig, + transformProgress, + lastCheckpoint, + nextCheckpoint, + seqNoPrimaryTermAndIndex, + context, + shouldStopAtCheckpoint + ); + } + + protected boolean triggerSaveState() { + // persist every iteration for testing + return true; + } + + public int getStatePersistenceFailures() { + return statePersistenceFailures.get(); + } + } + + private static class FailingToPutStoredDocTransformConfigManager extends InMemoryTransformConfigManager { + + private final Set failAt; + private final Exception exception; + private int persistenceCallCount = 0; + + FailingToPutStoredDocTransformConfigManager(Set failAt, Exception exception) { + this.failAt = failAt; + this.exception = exception; + } + + @Override + public void putOrUpdateTransformStoredDoc( + TransformStoredDoc storedDoc, + SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, + ActionListener listener + ) { + if (failAt.contains(persistenceCallCount++)) { + listener.onFailure(exception); + } else { + super.putOrUpdateTransformStoredDoc(storedDoc, seqNoPrimaryTermAndIndex, listener); + } + } + } + + private static class SeqNoCheckingTransformConfigManager extends InMemoryTransformConfigManager { + + private long seqNo = -1; + private long primaryTerm = 0; + + SeqNoCheckingTransformConfigManager() {} + + @Override + public void putOrUpdateTransformStoredDoc( + TransformStoredDoc storedDoc, + SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, + ActionListener listener + ) { + if (seqNo != -1) { + if (seqNoPrimaryTermAndIndex.getSeqNo() != seqNo || seqNoPrimaryTermAndIndex.getPrimaryTerm() != primaryTerm) { + listener.onFailure( + new VersionConflictEngineException(new ShardId("index", "indexUUID", 42), "some_id", 45L, 44L, 43L, 42L) + ); + return; + } + } + + super.putOrUpdateTransformStoredDoc(storedDoc, seqNoPrimaryTermAndIndex, ActionListener.wrap(r -> { + // always inc seqNo, primaryTerm at random + if (randomBoolean()) { + primaryTerm++; + } + listener.onResponse(new SeqNoPrimaryTermAndIndex(++seqNo, primaryTerm, CURRENT_INDEX)); + }, listener::onFailure)); + } + + @Override + public void getTransformStoredDoc( + String transformId, + boolean allowNoMatch, + ActionListener> resultListener + ) { + super.getTransformStoredDoc( + transformId, + allowNoMatch, + ActionListener.wrap( + r -> resultListener.onResponse(new Tuple<>(r.v1(), new SeqNoPrimaryTermAndIndex(seqNo, primaryTerm, CURRENT_INDEX))), + resultListener::onFailure + ) + ); + } + } + + public void testStatePersistenceErrorHandling() throws InterruptedException { + TransformConfig config = TransformConfigTests.randomTransformConfigWithSettings( + new SettingsConfig( + randomBoolean() ? null : randomIntBetween(10, 10_000), + randomBoolean() ? null : randomFloat(), + randomBoolean(), + randomBoolean(), + randomBoolean(), + randomBoolean(), + 2 + ) + ); + AtomicReference state = new AtomicReference<>(TransformTaskState.STARTED); + TransformContext.Listener contextListener = new TransformContext.Listener() { + @Override + public void shutdown() {} + + @Override + public void failureCountChanged() {} + + @Override + public void fail(String failureMessage, ActionListener listener) { + state.set(TransformTaskState.FAILED); + } + }; + + { + TransformContext context = new TransformContext(state.get(), null, 0, contextListener); + Exception exceptionToThrow = randomBoolean() + ? new VersionConflictEngineException(new ShardId("index", "indexUUID", 42), "some_id", 45L, 44L, 43L, 42L) + : new ElasticsearchTimeoutException("timeout"); + TransformConfigManager configManager = new FailingToPutStoredDocTransformConfigManager(Set.of(0, 1, 2, 3), exceptionToThrow); + try (Client client = new NoOpClient(getTestName())) { + + MockClientTransformIndexer indexer = new MockClientTransformIndexer( + mock(ThreadPool.class), + new TransformServices( + configManager, + mock(TransformCheckpointService.class), + mock(TransformAuditor.class), + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + ), + mock(CheckpointProvider.class), + new AtomicReference<>(IndexerState.STOPPED), + null, + client, + mock(TransformIndexerStats.class), + config, + null, + new TransformCheckpoint( + "transform", + Instant.now().toEpochMilli(), + 0L, + Collections.emptyMap(), + Instant.now().toEpochMilli() + ), + new TransformCheckpoint( + "transform", + Instant.now().toEpochMilli(), + 2L, + Collections.emptyMap(), + Instant.now().toEpochMilli() + ), + new SeqNoPrimaryTermAndIndex(1, 1, TransformInternalIndexConstants.LATEST_INDEX_NAME), + context, + false + ); + + this.assertAsyncFailure( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + e -> { + assertThat(e, isA(exceptionToThrow.getClass())); + assertThat(state.get(), equalTo(TransformTaskState.STARTED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(1)); + } + ); + + this.assertAsyncFailure( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + e -> { + assertThat(e, isA(exceptionToThrow.getClass())); + assertThat(state.get(), equalTo(TransformTaskState.STARTED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(2)); + } + ); + + this.assertAsyncFailure( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + e -> { + assertThat(e, isA(exceptionToThrow.getClass())); + assertThat(state.get(), equalTo(TransformTaskState.FAILED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(3)); + } + ); + } + + } + + // test reset on success + { + state.set(TransformTaskState.STARTED); + TransformContext context = new TransformContext(state.get(), null, 0, contextListener); + Exception exceptionToThrow = randomBoolean() + ? new VersionConflictEngineException(new ShardId("index", "indexUUID", 42), "some_id", 45L, 44L, 43L, 42L) + : new ElasticsearchTimeoutException("timeout"); + TransformConfigManager configManager = new FailingToPutStoredDocTransformConfigManager(Set.of(0, 2, 3, 4), exceptionToThrow); + try (Client client = new NoOpClient(getTestName())) { + MockClientTransformIndexer indexer = new MockClientTransformIndexer( + mock(ThreadPool.class), + new TransformServices( + configManager, + mock(TransformCheckpointService.class), + mock(TransformAuditor.class), + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + ), + mock(CheckpointProvider.class), + new AtomicReference<>(IndexerState.STOPPED), + null, + client, + mock(TransformIndexerStats.class), + config, + null, + new TransformCheckpoint( + "transform", + Instant.now().toEpochMilli(), + 0L, + Collections.emptyMap(), + Instant.now().toEpochMilli() + ), + new TransformCheckpoint( + "transform", + Instant.now().toEpochMilli(), + 2L, + Collections.emptyMap(), + Instant.now().toEpochMilli() + ), + new SeqNoPrimaryTermAndIndex(1, 1, TransformInternalIndexConstants.LATEST_INDEX_NAME), + context, + false + ); + + this.assertAsyncFailure( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + e -> { + assertThat(e, isA(exceptionToThrow.getClass())); + assertThat(state.get(), equalTo(TransformTaskState.STARTED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(1)); + } + ); + + // succeed + this.assertAsync( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + r -> { + assertThat(state.get(), equalTo(TransformTaskState.STARTED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(0)); + } + ); + + // fail again + this.assertAsyncFailure( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + e -> { + assertThat(e, isA(exceptionToThrow.getClass())); + assertThat(state.get(), equalTo(TransformTaskState.STARTED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(1)); + } + ); + + this.assertAsyncFailure( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + e -> { + assertThat(e, isA(exceptionToThrow.getClass())); + assertThat(state.get(), equalTo(TransformTaskState.STARTED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(2)); + } + ); + + this.assertAsyncFailure( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + e -> { + assertThat(e, isA(exceptionToThrow.getClass())); + assertThat(state.get(), equalTo(TransformTaskState.FAILED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(3)); + } + ); + } + + } + } + + public void testStatePersistenceRecovery() throws InterruptedException { + TransformConfig config = TransformConfigTests.randomTransformConfigWithSettings( + new SettingsConfig( + randomBoolean() ? null : randomIntBetween(10, 10_000), + randomBoolean() ? null : randomFloat(), + randomBoolean(), + randomBoolean(), + randomBoolean(), + randomBoolean(), + 2 + ) + ); + AtomicReference state = new AtomicReference<>(TransformTaskState.STARTED); + TransformContext.Listener contextListener = new TransformContext.Listener() { + @Override + public void shutdown() {} + + @Override + public void failureCountChanged() {} + + @Override + public void fail(String failureMessage, ActionListener listener) { + state.set(TransformTaskState.FAILED); + } + }; + + TransformContext context = new TransformContext(state.get(), null, 0, contextListener); + TransformConfigManager configManager = new SeqNoCheckingTransformConfigManager(); + + try (Client client = new NoOpClient(getTestName())) { + MockClientTransformIndexer indexer = new MockClientTransformIndexer( + mock(ThreadPool.class), + new TransformServices( + configManager, + mock(TransformCheckpointService.class), + mock(TransformAuditor.class), + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + ), + mock(CheckpointProvider.class), + new AtomicReference<>(IndexerState.STOPPED), + null, + client, + mock(TransformIndexerStats.class), + config, + null, + new TransformCheckpoint( + "transform", + Instant.now().toEpochMilli(), + 0L, + Collections.emptyMap(), + Instant.now().toEpochMilli() + ), + new TransformCheckpoint( + "transform", + Instant.now().toEpochMilli(), + 2L, + Collections.emptyMap(), + Instant.now().toEpochMilli() + ), + new SeqNoPrimaryTermAndIndex(1, 1, TransformInternalIndexConstants.LATEST_INDEX_NAME), + context, + false + ); + + // succeed + this.assertAsync( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + r -> { + assertThat(state.get(), equalTo(TransformTaskState.STARTED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(0)); + } + ); + + // push a new state outside the indexer + this.assertAsync( + listener -> configManager.putOrUpdateTransformStoredDoc( + new TransformStoredDoc( + config.getId(), + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + indexer.getStats() + ), + indexer.getSeqNoPrimaryTermAndIndex(), + listener + ), + seqNoPrimaryTermAndIndex -> assertThat( + seqNoPrimaryTermAndIndex.getSeqNo(), + equalTo(indexer.getSeqNoPrimaryTermAndIndex().getSeqNo() + 1) + ) + ); + + // state persistence should fail with a version conflict + this.assertAsyncFailure( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + e -> { + assertThat(e, isA(VersionConflictEngineException.class)); + assertThat(state.get(), equalTo(TransformTaskState.STARTED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(1)); + } + ); + + // recovered + this.assertAsync( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + r -> { + assertThat(indexer.getSeqNoPrimaryTermAndIndex().getSeqNo(), equalTo(2L)); + assertThat(state.get(), equalTo(TransformTaskState.STARTED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(0)); + } + ); + + // succeed + this.assertAsync( + listener -> indexer.persistState( + new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 42, null, null, null, false), + listener + ), + r -> { + assertThat(indexer.getSeqNoPrimaryTermAndIndex().getSeqNo(), equalTo(3L)); + assertThat(state.get(), equalTo(TransformTaskState.STARTED)); + assertThat(indexer.getStatePersistenceFailures(), equalTo(0)); + } + ); + } + + } + + private void assertAsync(Consumer> function, Consumer furtherTests) throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean listenerCalled = new AtomicBoolean(false); + + LatchedActionListener listener = new LatchedActionListener<>(ActionListener.wrap(r -> { + assertTrue("listener called more than once", listenerCalled.compareAndSet(false, true)); + furtherTests.accept(r); + }, e -> { + assertTrue("listener called more than once", listenerCalled.compareAndSet(false, true)); + fail("got unexpected exception: " + e); + }), latch); + + function.accept(listener); + assertTrue("timed out after 5s", latch.await(5, TimeUnit.SECONDS)); + } + + private void assertAsyncFailure(Consumer> function, Consumer failureConsumer) + throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean listenerCalled = new AtomicBoolean(false); + + LatchedActionListener listener = new LatchedActionListener<>(ActionListener.wrap(r -> { + assertTrue("listener called more than once", listenerCalled.compareAndSet(false, true)); + fail("got unexpected response: " + r); + }, e -> { + assertTrue("listener called more than once", listenerCalled.compareAndSet(false, true)); + failureConsumer.accept(e); + }), latch); + + function.accept(listener); + assertTrue("timed out after 5s", latch.await(5, TimeUnit.SECONDS)); + } +} From c7e10e70e16df8ac66f5f72995d6772faf5f1ce8 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Wed, 3 Aug 2022 09:01:44 +0300 Subject: [PATCH 068/265] Refactor WildcardExpressionResolver to better track usages of indices lookup (#89000) This is a pure refactoring of the WildcardExpressionResolver. The objective is to restrict access to the indices lookup through the context parameter only. Eventually, Security is going to plug into the context and only show a restricted view of the indices lookup, particular to the user context. --- .../metadata/IndexNameExpressionResolver.java | 126 ++++++++---------- .../WildcardExpressionResolverTests.java | 37 +---- 2 files changed, 65 insertions(+), 98 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 674fd91ce00c6..4a96508ba8532 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -1125,19 +1125,18 @@ private WildcardExpressionResolver() { } public static List resolve(Context context, List expressions) { - IndicesOptions options = context.getOptions(); - Metadata metadata = context.getState().metadata(); // only check open/closed since if we do not expand to open or closed it doesn't make sense to // expand to hidden - if (options.expandWildcardsClosed() == false && options.expandWildcardsOpen() == false) { + if (context.getOptions().expandWildcardsClosed() == false && context.getOptions().expandWildcardsOpen() == false) { return expressions; } if (isEmptyOrTrivialWildcard(expressions)) { List resolvedExpressions = resolveEmptyOrTrivialWildcard(context); if (context.includeDataStreams()) { - final IndexMetadata.State excludeState = excludeState(options); - final Map dataStreamsAbstractions = metadata.getIndicesLookup() + final Map dataStreamsAbstractions = context.getState() + .metadata() + .getIndicesLookup() .entrySet() .stream() .filter(entry -> entry.getValue().getType() == IndexAbstraction.Type.DATA_STREAM) @@ -1145,25 +1144,19 @@ public static List resolve(Context context, List expressions) { // dedup backing indices if expand hidden indices option is true Set resolvedIncludingDataStreams = new HashSet<>(resolvedExpressions); resolvedIncludingDataStreams.addAll( - expand( - context, - excludeState, - dataStreamsAbstractions, - expressions.isEmpty() ? "_all" : expressions.get(0), - options.expandWildcardsHidden() - ) + expand(context, dataStreamsAbstractions, expressions.isEmpty() ? "_all" : expressions.get(0)) ); return new ArrayList<>(resolvedIncludingDataStreams); } return resolvedExpressions; } - Set result = innerResolve(context, expressions, options, metadata); + Set result = innerResolve(context, expressions); if (result == null) { return expressions; } - if (result.isEmpty() && options.allowNoIndices() == false) { + if (result.isEmpty() && context.getOptions().allowNoIndices() == false) { IndexNotFoundException infe = new IndexNotFoundException((String) null); infe.setResources("index_or_alias", expressions.toArray(new String[0])); throw infe; @@ -1171,16 +1164,13 @@ public static List resolve(Context context, List expressions) { return new ArrayList<>(result); } - private static Set innerResolve(Context context, List expressions, IndicesOptions options, Metadata metadata) { + private static Set innerResolve(Context context, List expressions) { Set result = null; boolean wildcardSeen = false; for (int i = 0; i < expressions.size(); i++) { String expression = expressions.get(i); - if (Strings.isEmpty(expression)) { - throw indexNotFoundException(expression); - } validateAliasOrIndex(expression); - if (aliasOrIndexExists(context, options, metadata, expression)) { + if (aliasOrIndexExists(context, expression, false)) { if (result != null) { result.add(expression); } @@ -1199,15 +1189,8 @@ private static Set innerResolve(Context context, List expression } if (Regex.isSimpleMatchPattern(expression) == false) { // TODO why does wildcard resolver throw exceptions regarding non wildcarded expressions? This should not be done here. - if (options.ignoreUnavailable() == false) { - IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(expression); - if (indexAbstraction == null) { - throw indexNotFoundException(expression); - } else if (indexAbstraction.getType() == IndexAbstraction.Type.ALIAS && options.ignoreAliases()) { - throw aliasesNotSupportedException(expression); - } else if (indexAbstraction.isDataStreamRelated() && context.includeDataStreams() == false) { - throw indexNotFoundException(expression); - } + if (context.getOptions().ignoreUnavailable() == false) { + aliasOrIndexExists(context, expression, true); } if (add) { result.add(expression); @@ -1216,26 +1199,26 @@ private static Set innerResolve(Context context, List expression } continue; } + wildcardSeen = true; - final IndexMetadata.State excludeState = excludeState(options); - final Map matches = matches(context, metadata, expression); - Set expand = expand(context, excludeState, matches, expression, options.expandWildcardsHidden()); + final Map matches = matches(context, expression); + if (context.getOptions().allowNoIndices() == false && matches.isEmpty()) { + throw indexNotFoundException(expression); + } + Set expand = expand(context, matches, expression); if (add) { result.addAll(expand); } else { result.removeAll(expand); } - if (options.allowNoIndices() == false && matches.isEmpty()) { - throw indexNotFoundException(expression); - } - if (Regex.isSimpleMatchPattern(expression)) { - wildcardSeen = true; - } } return result; } private static void validateAliasOrIndex(String expression) { + if (Strings.isEmpty(expression)) { + throw indexNotFoundException(expression); + } // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown @@ -1245,18 +1228,28 @@ private static void validateAliasOrIndex(String expression) { } } - private static boolean aliasOrIndexExists(Context context, IndicesOptions options, Metadata metadata, String expression) { - IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(expression); + private static boolean aliasOrIndexExists(Context context, String expression, boolean throwExceptionIfAbsent) { + final IndicesOptions options = context.getOptions(); + IndexAbstraction indexAbstraction = context.getState().getMetadata().getIndicesLookup().get(expression); if (indexAbstraction == null) { + if (throwExceptionIfAbsent) { + throw indexNotFoundException(expression); + } return false; } // treat aliases as unavailable indices when ignoreAliases is set to true (e.g. delete index and update aliases api) if (indexAbstraction.getType() == IndexAbstraction.Type.ALIAS && options.ignoreAliases()) { + if (throwExceptionIfAbsent) { + throw aliasesNotSupportedException(expression); + } return false; } if (indexAbstraction.isDataStreamRelated() && context.includeDataStreams() == false) { + if (throwExceptionIfAbsent) { + throw indexNotFoundException(expression); + } return false; } @@ -1284,45 +1277,46 @@ private static IndexMetadata.State excludeState(IndicesOptions options) { return excludeState; } - public static Map matches(Context context, Metadata metadata, String expression) { + public static Map matches(Context context, String expression) { + SortedMap indicesLookup = context.getState().getMetadata().getIndicesLookup(); if (Regex.isMatchAllPattern(expression)) { - return filterIndicesLookup(context, metadata.getIndicesLookup(), null, context.getOptions()); + return filterIndicesLookup(indicesLookup, null, context.getOptions().ignoreAliases(), context.includeDataStreams()); } else if (expression.indexOf("*") == expression.length() - 1) { - return suffixWildcard(context, metadata, expression); + return suffixWildcard(indicesLookup, expression, context.getOptions().ignoreAliases(), context.includeDataStreams()); } else { - return otherWildcard(context, metadata, expression); + return filterIndicesLookup( + indicesLookup, + e -> Regex.simpleMatch(expression, e.getKey()), + context.getOptions().ignoreAliases(), + context.includeDataStreams() + ); } } - private static Map suffixWildcard(Context context, Metadata metadata, String expression) { + private static Map suffixWildcard( + SortedMap indicesLookup, + String expression, + boolean ignoreAliases, + boolean includeDataStreams + ) { assert expression.length() >= 2 : "expression [" + expression + "] should have at least a length of 2"; String fromPrefix = expression.substring(0, expression.length() - 1); char[] toPrefixCharArr = fromPrefix.toCharArray(); toPrefixCharArr[toPrefixCharArr.length - 1]++; String toPrefix = new String(toPrefixCharArr); - SortedMap subMap = metadata.getIndicesLookup().subMap(fromPrefix, toPrefix); - return filterIndicesLookup(context, subMap, null, context.getOptions()); - } - - private static Map otherWildcard(Context context, Metadata metadata, String expression) { - final String pattern = expression; - return filterIndicesLookup( - context, - metadata.getIndicesLookup(), - e -> Regex.simpleMatch(pattern, e.getKey()), - context.getOptions() - ); + SortedMap subMap = indicesLookup.subMap(fromPrefix, toPrefix); + return filterIndicesLookup(subMap, null, ignoreAliases, includeDataStreams); } private static Map filterIndicesLookup( - Context context, - SortedMap indicesLookup, + Map indicesLookup, Predicate> filter, - IndicesOptions options + boolean ignoreAliases, + boolean includeDataStreams ) { boolean shouldConsumeStream = false; Stream> stream = indicesLookup.entrySet().stream(); - if (options.ignoreAliases()) { + if (ignoreAliases) { shouldConsumeStream = true; stream = stream.filter(e -> e.getValue().getType() != IndexAbstraction.Type.ALIAS); } @@ -1330,7 +1324,7 @@ private static Map filterIndicesLookup( shouldConsumeStream = true; stream = stream.filter(filter); } - if (context.includeDataStreams() == false) { + if (includeDataStreams == false) { shouldConsumeStream = true; stream = stream.filter(e -> e.getValue().isDataStreamRelated() == false); } @@ -1341,13 +1335,9 @@ private static Map filterIndicesLookup( } } - private static Set expand( - Context context, - IndexMetadata.State excludeState, - Map matches, - String expression, - boolean includeHidden - ) { + private static Set expand(Context context, Map matches, String expression) { + final IndexMetadata.State excludeState = excludeState(context.getOptions()); + final boolean includeHidden = context.getOptions().expandWildcardsHidden(); Set expand = new HashSet<>(); for (Map.Entry entry : matches.entrySet()) { String aliasOrIndexName = entry.getKey(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index 0959b9bcd436b..4af4feed49cb0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -539,51 +539,28 @@ public void testMatchesConcreteIndicesWildcardAndAliases() { ); { - Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches( - indicesAndAliasesContext, - state.getMetadata(), - "*" - ).keySet(); + Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches(indicesAndAliasesContext, "*").keySet(); assertEquals(newHashSet("bar_bar", "foo_foo", "foo_index", "bar_index", "foo_alias"), matches); } { - Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches( - onlyIndicesContext, - state.getMetadata(), - "*" - ).keySet(); + Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches(onlyIndicesContext, "*").keySet(); assertEquals(newHashSet("bar_bar", "foo_foo", "foo_index", "bar_index"), matches); } { - Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches( - indicesAndAliasesContext, - state.getMetadata(), - "foo*" - ).keySet(); + Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches(indicesAndAliasesContext, "foo*").keySet(); assertEquals(newHashSet("foo_foo", "foo_index", "foo_alias"), matches); } { - Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches( - onlyIndicesContext, - state.getMetadata(), - "foo*" - ).keySet(); + Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches(onlyIndicesContext, "foo*").keySet(); assertEquals(newHashSet("foo_foo", "foo_index"), matches); } { - Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches( - indicesAndAliasesContext, - state.getMetadata(), - "foo_alias" - ).keySet(); + Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches(indicesAndAliasesContext, "foo_alias") + .keySet(); assertEquals(newHashSet("foo_alias"), matches); } { - Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches( - onlyIndicesContext, - state.getMetadata(), - "foo_alias" - ).keySet(); + Set matches = IndexNameExpressionResolver.WildcardExpressionResolver.matches(onlyIndicesContext, "foo_alias").keySet(); assertEquals(newHashSet(), matches); } } From d828c2a6424c016103357f9be2a647769164ad06 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Wed, 3 Aug 2022 11:10:26 +0300 Subject: [PATCH 069/265] Health API - Monitoring local disk health (#88390) This PR introduces the local health monitoring functionality needed for #84811 . The monitor uses the `NodeService` to get the disk usage stats and determines the node's disk health. When a change in the disk's is detected or when the health node changes, this class would be responsible to send the node's health to the health node. Currently this is simulated with a method that just logs the current health. The monitor keeps the last reported health, this way, if something fails on the next check it will try to resend the new health state. --- server/src/main/java/module-info.java | 1 + .../common/settings/ClusterSettings.java | 4 +- .../health/metadata/HealthMetadata.java | 39 ++- .../health/node/DiskHealthInfo.java | 27 ++ .../health/node/LocalHealthMonitor.java | 205 ++++++++++++ .../java/org/elasticsearch/node/Node.java | 5 + .../health/metadata/HealthMetadataTests.java | 52 +++ .../health/node/LocalHealthMonitorTests.java | 311 ++++++++++++++++++ 8 files changed, 633 insertions(+), 11 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/health/node/DiskHealthInfo.java create mode 100644 server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java create mode 100644 server/src/test/java/org/elasticsearch/health/metadata/HealthMetadataTests.java create mode 100644 server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 137d61f0b4385..81f530a4e2c1c 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -213,6 +213,7 @@ exports org.elasticsearch.env; exports org.elasticsearch.gateway; exports org.elasticsearch.health; + exports org.elasticsearch.health.node; exports org.elasticsearch.health.node.selection; exports org.elasticsearch.http; exports org.elasticsearch.index; diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 19bbaeef72279..8aac11e4326df 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -67,6 +67,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.PersistedClusterStateService; +import org.elasticsearch.health.node.LocalHealthMonitor; import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.health.node.selection.HealthNodeTaskExecutor; import org.elasticsearch.http.HttpTransportSettings; @@ -524,7 +525,8 @@ public void apply(Settings value, Settings current, Settings previous) { CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING, MasterHistory.MAX_HISTORY_AGE_SETTING, ReadinessService.PORT, - HealthNode.isEnabled() ? HealthNodeTaskExecutor.ENABLED_SETTING : null + HealthNode.isEnabled() ? HealthNodeTaskExecutor.ENABLED_SETTING : null, + HealthNode.isEnabled() ? LocalHealthMonitor.POLL_INTERVAL_SETTING : null ).filter(Objects::nonNull).collect(Collectors.toSet()); static List> BUILT_IN_SETTING_UPGRADERS = Collections.emptyList(); diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java index 4990bba4f5036..e9e5ecc178823 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java @@ -145,6 +145,25 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + private ByteSizeValue getFreeBytes(ByteSizeValue total, RelativeByteSizeValue watermark, ByteSizeValue maxHeadroom) { + if (watermark.isAbsolute()) { + return watermark.getAbsolute(); + } + return ByteSizeValue.ofBytes(total.getBytes() - watermark.calculateValue(total, maxHeadroom).getBytes()); + } + + public ByteSizeValue getFreeBytesHighWatermark(ByteSizeValue total) { + return getFreeBytes(total, highWatermark, ByteSizeValue.MINUS_ONE); + } + + public ByteSizeValue getFreeBytesFloodStageWatermark(ByteSizeValue total) { + return getFreeBytes(total, floodStageWatermark, ByteSizeValue.MINUS_ONE); + } + + public ByteSizeValue getFreeBytesFrozenFloodStageWatermark(ByteSizeValue total) { + return getFreeBytes(total, frozenFloodStageWatermark, frozenFloodStageMaxHeadroom); + } + private String getThresholdStringRep(RelativeByteSizeValue relativeByteSizeValue) { if (relativeByteSizeValue.isAbsolute()) { return relativeByteSizeValue.getAbsolute().getStringRep(); @@ -186,11 +205,11 @@ public int hashCode() { ); } - static Builder newBuilder() { + public static Builder newBuilder() { return new Builder(); } - static Builder newBuilder(Disk disk) { + public static Builder newBuilder(Disk disk) { return new Builder(disk); } @@ -210,16 +229,16 @@ private Builder(Disk disk) { private Builder() {} - Builder highWatermark(RelativeByteSizeValue highWatermark) { + public Disk.Builder highWatermark(RelativeByteSizeValue highWatermark) { this.highWatermark = highWatermark; return this; } - Builder highWatermark(String highWatermark, String setting) { + public Disk.Builder highWatermark(String highWatermark, String setting) { return highWatermark(RelativeByteSizeValue.parseRelativeByteSizeValue(highWatermark, setting)); } - Builder floodStageWatermark(RelativeByteSizeValue floodStageWatermark) { + public Disk.Builder floodStageWatermark(RelativeByteSizeValue floodStageWatermark) { this.floodStageWatermark = floodStageWatermark; return this; } @@ -228,25 +247,25 @@ public Builder floodStageWatermark(String floodStageWatermark, String setting) { return floodStageWatermark(RelativeByteSizeValue.parseRelativeByteSizeValue(floodStageWatermark, setting)); } - Builder frozenFloodStageWatermark(RelativeByteSizeValue frozenFloodStageWatermark) { + public Disk.Builder frozenFloodStageWatermark(RelativeByteSizeValue frozenFloodStageWatermark) { this.frozenFloodStageWatermark = frozenFloodStageWatermark; return this; } - Builder frozenFloodStageWatermark(String frozenFloodStageWatermark, String setting) { + public Disk.Builder frozenFloodStageWatermark(String frozenFloodStageWatermark, String setting) { return frozenFloodStageWatermark(RelativeByteSizeValue.parseRelativeByteSizeValue(frozenFloodStageWatermark, setting)); } - Builder frozenFloodStageMaxHeadroom(ByteSizeValue frozenFloodStageMaxHeadroom) { + public Disk.Builder frozenFloodStageMaxHeadroom(ByteSizeValue frozenFloodStageMaxHeadroom) { this.frozenFloodStageMaxHeadroom = frozenFloodStageMaxHeadroom; return this; } - Builder frozenFloodStageMaxHeadroom(String frozenFloodStageMaxHeadroom, String setting) { + public Disk.Builder frozenFloodStageMaxHeadroom(String frozenFloodStageMaxHeadroom, String setting) { return frozenFloodStageMaxHeadroom(ByteSizeValue.parseBytesSizeValue(frozenFloodStageMaxHeadroom, setting)); } - Disk build() { + public Disk build() { return new Disk(highWatermark, floodStageWatermark, frozenFloodStageWatermark, frozenFloodStageMaxHeadroom); } } diff --git a/server/src/main/java/org/elasticsearch/health/node/DiskHealthInfo.java b/server/src/main/java/org/elasticsearch/health/node/DiskHealthInfo.java new file mode 100644 index 0000000000000..4638e842d0cd3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/health/node/DiskHealthInfo.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.health.node; + +import org.elasticsearch.health.HealthStatus; + +/** + * The health status of the disk space of this node along with the cause. + */ +record DiskHealthInfo(HealthStatus healthStatus, Cause cause) { + DiskHealthInfo(HealthStatus healthStatus) { + this(healthStatus, null); + } + + enum Cause { + NODE_OVER_HIGH_THRESHOLD, + NODE_OVER_THE_FLOOD_STAGE_THRESHOLD, + FROZEN_NODE_OVER_FLOOD_STAGE_THRESHOLD, + NODE_HAS_NO_DISK_STATS + } +} diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java new file mode 100644 index 0000000000000..8dc1afa6a99cc --- /dev/null +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -0,0 +1,205 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.health.node; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.DiskUsage; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.health.HealthStatus; +import org.elasticsearch.health.metadata.HealthMetadata; +import org.elasticsearch.health.node.selection.HealthNodeTaskExecutor; +import org.elasticsearch.node.NodeService; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This class monitors the health of the node regarding the load on several resources. + * Currently, it only checks for available disk space. Furthermore, it informs the health + * node about the local health upon change or when a new node is detected. + */ +public class LocalHealthMonitor implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(LocalHealthMonitor.class); + + public static final Setting POLL_INTERVAL_SETTING = Setting.timeSetting( + "health.reporting.local.monitor.interval", + TimeValue.timeValueSeconds(30), + TimeValue.timeValueSeconds(10), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private final ClusterService clusterService; + private final ThreadPool threadPool; + private final DiskCheck diskCheck; + + private volatile TimeValue monitorInterval; + private volatile boolean enabled; + // Signals that all the prerequisites have been fulfilled and the monitoring task can be scheduled. + private volatile boolean prerequisitesFulfilled; + // Ensures that only one monitoring task will be in progress at any moment in time. + // It removes the need to synchronize scheduling since at the event that there are two + // monitoring tasks scheduled, one of them will be no-op. + private final AtomicBoolean inProgress = new AtomicBoolean(); + // Keeps the latest health state that was successfully reported. + private volatile DiskHealthInfo lastReportedDiskHealthInfo = null; + + public LocalHealthMonitor(Settings settings, ClusterService clusterService, NodeService nodeService, ThreadPool threadPool) { + this.threadPool = threadPool; + this.monitorInterval = POLL_INTERVAL_SETTING.get(settings); + this.enabled = HealthNodeTaskExecutor.ENABLED_SETTING.get(settings); + this.clusterService = clusterService; + this.diskCheck = new DiskCheck(nodeService); + clusterService.addListener(this); + ClusterSettings clusterSettings = clusterService.getClusterSettings(); + clusterSettings.addSettingsUpdateConsumer(POLL_INTERVAL_SETTING, this::setMonitorInterval); + clusterSettings.addSettingsUpdateConsumer(HealthNodeTaskExecutor.ENABLED_SETTING, this::setEnabled); + } + + void setMonitorInterval(TimeValue monitorInterval) { + this.monitorInterval = monitorInterval; + maybeScheduleNow(); + } + + void setEnabled(boolean enabled) { + this.enabled = enabled; + maybeScheduleNow(); + } + + /** + * We always check if the prerequisites are fulfilled and if the health node + * is enabled before we schedule a monitoring task. + */ + private void maybeScheduleNextRun(TimeValue time) { + if (prerequisitesFulfilled && enabled) { + threadPool.scheduleUnlessShuttingDown(time, ThreadPool.Names.MANAGEMENT, this::monitorHealth); + } + } + + // Helper method that starts the monitoring without a delay. + private void maybeScheduleNow() { + maybeScheduleNextRun(TimeValue.timeValueMillis(1)); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (prerequisitesFulfilled == false) { + prerequisitesFulfilled = event.state().nodesIfRecovered().getMinNodeVersion().onOrAfter(Version.V_8_5_0) + && HealthMetadata.getFromClusterState(event.state()) != null; + maybeScheduleNow(); + } + } + + // Visible for testing + void monitorHealth() { + if (inProgress.compareAndSet(false, true)) { + ClusterState clusterState = clusterService.state(); + HealthMetadata healthMetadata = HealthMetadata.getFromClusterState(clusterState); + assert healthMetadata != null : "health metadata should have been initialized."; + DiskHealthInfo previousHealth = this.lastReportedDiskHealthInfo; + DiskHealthInfo currentHealth = diskCheck.getHealth(healthMetadata, clusterState); + if (currentHealth.equals(previousHealth) == false) { + logger.debug("Health status changed from {} to {}", previousHealth, currentHealth); + this.lastReportedDiskHealthInfo = currentHealth; + } + inProgress.set(false); + // Scheduling happens after the flag inProgress is false, this ensures that + // if the feature is enabled after the following schedule statement, the setEnabled + // method will be able to schedule the next run, and it will not be a no-op. + // We prefer to err towards an extra scheduling than miss the enabling of this feature alltogether. + maybeScheduleNextRun(monitorInterval); + } + } + + DiskHealthInfo getLastReportedDiskHealthInfo() { + return lastReportedDiskHealthInfo; + } + + /** + * Determines the disk health of this node by checking if it exceeds the thresholds defined in the health metadata. + */ + static class DiskCheck { + private final NodeService nodeService; + + DiskCheck(NodeService nodeService) { + this.nodeService = nodeService; + } + + DiskHealthInfo getHealth(HealthMetadata healthMetadata, ClusterState clusterState) { + DiscoveryNode node = clusterState.getNodes().getLocalNode(); + HealthMetadata.Disk diskMetadata = healthMetadata.getDiskMetadata(); + DiskUsage usage = getDiskUsage(); + if (usage == null) { + return new DiskHealthInfo(HealthStatus.UNKNOWN, DiskHealthInfo.Cause.NODE_HAS_NO_DISK_STATS); + } + + ByteSizeValue totalBytes = ByteSizeValue.ofBytes(usage.getTotalBytes()); + + if (node.isDedicatedFrozenNode()) { + long frozenFloodStageThreshold = diskMetadata.getFreeBytesFrozenFloodStageWatermark(totalBytes).getBytes(); + if (usage.getFreeBytes() < frozenFloodStageThreshold) { + logger.debug("flood stage disk watermark [{}] exceeded on {}", frozenFloodStageThreshold, usage); + return new DiskHealthInfo(HealthStatus.RED, DiskHealthInfo.Cause.FROZEN_NODE_OVER_FLOOD_STAGE_THRESHOLD); + } + return new DiskHealthInfo(HealthStatus.GREEN); + } + + long floodStageThreshold = diskMetadata.getFreeBytesFloodStageWatermark(totalBytes).getBytes(); + if (usage.getFreeBytes() < floodStageThreshold) { + return new DiskHealthInfo(HealthStatus.RED, DiskHealthInfo.Cause.NODE_OVER_THE_FLOOD_STAGE_THRESHOLD); + } + + long highThreshold = diskMetadata.getFreeBytesHighWatermark(totalBytes).getBytes(); + if (usage.getFreeBytes() < highThreshold && hasRelocatingShards(clusterState, node.getId()) == false) { + return new DiskHealthInfo(HealthStatus.YELLOW, DiskHealthInfo.Cause.NODE_OVER_HIGH_THRESHOLD); + } + return new DiskHealthInfo(HealthStatus.GREEN); + } + + private DiskUsage getDiskUsage() { + NodeStats nodeStats = nodeService.stats( + CommonStatsFlags.NONE, + false, + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false + ); + return DiskUsage.findLeastAvailablePath(nodeStats); + } + + private boolean hasRelocatingShards(ClusterState clusterState, String nodeId) { + return clusterState.getRoutingNodes().node(nodeId).shardsWithState(ShardRoutingState.RELOCATING).isEmpty() == false; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 1fbd21a4aea66..7da6ce409debb 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -102,6 +102,7 @@ import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.health.HealthService; import org.elasticsearch.health.metadata.HealthMetadataService; +import org.elasticsearch.health.node.LocalHealthMonitor; import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.health.node.selection.HealthNodeTaskExecutor; import org.elasticsearch.http.HttpServerTransport; @@ -945,6 +946,9 @@ protected Node( HealthMetadataService healthMetadataService = HealthNode.isEnabled() ? new HealthMetadataService(clusterService, settings) : null; + LocalHealthMonitor localHealthMonitor = HealthNode.isEnabled() + ? new LocalHealthMonitor(settings, clusterService, nodeService, threadPool) + : null; FileSettingsService fileSettingsService = new FileSettingsService( clusterService, @@ -1038,6 +1042,7 @@ protected Node( if (HealthNode.isEnabled()) { b.bind(HealthNodeTaskExecutor.class).toInstance(healthNodeTaskExecutor); b.bind(HealthMetadataService.class).toInstance(healthMetadataService); + b.bind(LocalHealthMonitor.class).toInstance(localHealthMonitor); } b.bind(Tracer.class).toInstance(tracer); b.bind(FileSettingsService.class).toInstance(fileSettingsService); diff --git a/server/src/test/java/org/elasticsearch/health/metadata/HealthMetadataTests.java b/server/src/test/java/org/elasticsearch/health/metadata/HealthMetadataTests.java new file mode 100644 index 0000000000000..2453c4efae221 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/health/metadata/HealthMetadataTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.health.metadata; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class HealthMetadataTests extends ESTestCase { + + public void testFreeBytesCalculationOfAbsoluteValue() { + HealthMetadata.Disk metadata = HealthMetadata.Disk.newBuilder() + .highWatermark("100B", "bytes-high") + .floodStageWatermark("50B", "bytes-flood") + .frozenFloodStageWatermark("50B", "bytes-frozen-flood") + .frozenFloodStageMaxHeadroom("20B", "headroom") + .build(); + assertThat(metadata.getFreeBytesHighWatermark(ByteSizeValue.MINUS_ONE), equalTo(ByteSizeValue.ofBytes(100))); + assertThat(metadata.getFreeBytesFloodStageWatermark(ByteSizeValue.MINUS_ONE), equalTo(ByteSizeValue.ofBytes(50))); + assertThat(metadata.getFreeBytesFrozenFloodStageWatermark(ByteSizeValue.MINUS_ONE), equalTo(ByteSizeValue.ofBytes(50))); + } + + public void testFreeBytesCalculationMaxHeadroom() { + HealthMetadata.Disk metadata = HealthMetadata.Disk.newBuilder() + .highWatermark("90%", "ratio-high") + .floodStageWatermark("95%", "ratio-flood") + .frozenFloodStageWatermark("95%", "ratio-frozen-flood") + .frozenFloodStageMaxHeadroom("20B", "headroom") + .build(); + // For now only the frozen tier is using the max headroom setting + assertThat(metadata.getFreeBytesFrozenFloodStageWatermark(ByteSizeValue.ofBytes(1000)), equalTo(ByteSizeValue.ofBytes(20))); + } + + public void testFreeBytesCalculationPercent() { + HealthMetadata.Disk metadata = HealthMetadata.Disk.newBuilder() + .highWatermark("90%", "ratio-high") + .floodStageWatermark("95%", "ratio-flood") + .frozenFloodStageWatermark("95%", "ratio-frozen-flood") + .frozenFloodStageMaxHeadroom("60B", "headroom") + .build(); + assertThat(metadata.getFreeBytesHighWatermark(ByteSizeValue.ofBytes(1000)), equalTo(ByteSizeValue.ofBytes(100))); + assertThat(metadata.getFreeBytesFloodStageWatermark(ByteSizeValue.ofBytes(1000)), equalTo(ByteSizeValue.ofBytes(50))); + assertThat(metadata.getFreeBytesFrozenFloodStageWatermark(ByteSizeValue.ofBytes(1000)), equalTo(ByteSizeValue.ofBytes(50))); + } +} diff --git a/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java new file mode 100644 index 0000000000000..24b1b58a75eb1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java @@ -0,0 +1,311 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.health.node; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.RelativeByteSizeValue; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.health.HealthStatus; +import org.elasticsearch.health.metadata.HealthMetadata; +import org.elasticsearch.health.node.selection.HealthNodeExecutorTests; +import org.elasticsearch.monitor.fs.FsInfo; +import org.elasticsearch.node.NodeService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class LocalHealthMonitorTests extends ESTestCase { + + private static ThreadPool threadPool; + private NodeService nodeService; + private ClusterService clusterService; + private DiscoveryNode node; + private DiscoveryNode frozenNode; + private HealthMetadata healthMetadata; + private ClusterState clusterState; + + @BeforeClass + public static void setUpThreadPool() { + threadPool = new TestThreadPool(HealthNodeExecutorTests.class.getSimpleName()); + } + + @AfterClass + public static void tearDownThreadPool() { + terminate(threadPool); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + // Set-up cluster state + healthMetadata = new HealthMetadata( + HealthMetadata.Disk.newBuilder() + .highWatermark(new RelativeByteSizeValue(ByteSizeValue.ofBytes(100))) + .floodStageWatermark(new RelativeByteSizeValue(ByteSizeValue.ofBytes(50))) + .frozenFloodStageWatermark(new RelativeByteSizeValue(ByteSizeValue.ofBytes(50))) + .frozenFloodStageMaxHeadroom(ByteSizeValue.ofBytes(10)) + .build() + ); + node = new DiscoveryNode( + "node", + "node", + ESTestCase.buildNewFakeTransportAddress(), + Collections.emptyMap(), + DiscoveryNodeRole.roles(), + Version.CURRENT + ); + frozenNode = new DiscoveryNode( + "frozen-node", + "frozen-node", + ESTestCase.buildNewFakeTransportAddress(), + Collections.emptyMap(), + Set.of(DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE), + Version.CURRENT + ); + clusterState = ClusterState.EMPTY_STATE.copyAndUpdate( + b -> b.nodes(DiscoveryNodes.builder().add(node).add(frozenNode).localNodeId(node.getId()).build()) + ).copyAndUpdate(b -> b.putCustom(HealthMetadata.TYPE, healthMetadata)); + + // Set-up cluster service + clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + when(clusterService.state()).thenReturn(clusterState); + + // Set-up node service with a node with a healthy disk space usage + nodeService = mock(NodeService.class); + } + + public void testUpdateNodeHealthStatus() { + simulateHealthDiskSpace(); + LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor(Settings.EMPTY, clusterService, nodeService, threadPool); + assertThat(localHealthMonitor.getLastReportedDiskHealthInfo(), nullValue()); + localHealthMonitor.monitorHealth(); + assertThat(localHealthMonitor.getLastReportedDiskHealthInfo(), equalTo(new DiskHealthInfo(HealthStatus.GREEN, null))); + } + + public void testEnablingAndDisabling() throws Exception { + simulateHealthDiskSpace(); + DiskHealthInfo healthyNode = new DiskHealthInfo(HealthStatus.GREEN, null); + when(clusterService.state()).thenReturn(null); + LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor(Settings.EMPTY, clusterService, nodeService, threadPool); + + // Ensure that there are no issues if the cluster state hasn't been initialized yet + localHealthMonitor.setEnabled(true); + assertThat(localHealthMonitor.getLastReportedDiskHealthInfo(), nullValue()); + + when(clusterService.state()).thenReturn(clusterState); + localHealthMonitor.clusterChanged(new ClusterChangedEvent("test", clusterState, ClusterState.EMPTY_STATE)); + assertBusy(() -> assertThat(localHealthMonitor.getLastReportedDiskHealthInfo(), equalTo(healthyNode))); + + // Disable the local monitoring + localHealthMonitor.setEnabled(false); + localHealthMonitor.setMonitorInterval(TimeValue.timeValueMillis(1)); + simulateDiskOutOfSpace(); + assertRemainsUnchanged(localHealthMonitor::getLastReportedDiskHealthInfo, healthyNode); + + localHealthMonitor.setEnabled(true); + DiskHealthInfo nextHealthStatus = new DiskHealthInfo(HealthStatus.RED, DiskHealthInfo.Cause.NODE_OVER_THE_FLOOD_STAGE_THRESHOLD); + assertBusy(() -> assertThat(localHealthMonitor.getLastReportedDiskHealthInfo(), equalTo(nextHealthStatus))); + } + + private void assertRemainsUnchanged(Supplier supplier, DiskHealthInfo expected) { + expectThrows(AssertionError.class, () -> assertBusy(() -> assertThat(supplier.get(), not(expected)), 1, TimeUnit.SECONDS)); + } + + public void testNoDiskData() { + when( + nodeService.stats( + eq(CommonStatsFlags.NONE), + eq(false), + eq(false), + eq(false), + eq(false), + eq(true), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false) + ) + ).thenReturn(nodeStats()); + LocalHealthMonitor.DiskCheck diskCheck = new LocalHealthMonitor.DiskCheck(nodeService); + DiskHealthInfo diskHealth = diskCheck.getHealth(healthMetadata, clusterState); + assertThat(diskHealth, equalTo(new DiskHealthInfo(HealthStatus.UNKNOWN, DiskHealthInfo.Cause.NODE_HAS_NO_DISK_STATS))); + } + + public void testGreenDiskStatus() { + simulateHealthDiskSpace(); + LocalHealthMonitor.DiskCheck diskMonitor = new LocalHealthMonitor.DiskCheck(nodeService); + DiskHealthInfo diskHealth = diskMonitor.getHealth(healthMetadata, clusterState); + assertThat(diskHealth, equalTo(new DiskHealthInfo(HealthStatus.GREEN, null))); + } + + public void testYellowDiskStatus() { + initializeIncreasedDiskSpaceUsage(); + LocalHealthMonitor.DiskCheck diskMonitor = new LocalHealthMonitor.DiskCheck(nodeService); + DiskHealthInfo diskHealth = diskMonitor.getHealth(healthMetadata, clusterState); + assertThat(diskHealth, equalTo(new DiskHealthInfo(HealthStatus.YELLOW, DiskHealthInfo.Cause.NODE_OVER_HIGH_THRESHOLD))); + } + + public void testRedDiskStatus() { + simulateDiskOutOfSpace(); + LocalHealthMonitor.DiskCheck diskMonitor = new LocalHealthMonitor.DiskCheck(nodeService); + DiskHealthInfo diskHealth = diskMonitor.getHealth(healthMetadata, clusterState); + assertThat(diskHealth, equalTo(new DiskHealthInfo(HealthStatus.RED, DiskHealthInfo.Cause.NODE_OVER_THE_FLOOD_STAGE_THRESHOLD))); + } + + public void testFrozenGreenDiskStatus() { + simulateHealthDiskSpace(); + ClusterState clusterStateFrozenLocalNode = clusterState.copyAndUpdate( + b -> b.nodes(DiscoveryNodes.builder().add(node).add(frozenNode).localNodeId(frozenNode.getId()).build()) + ); + LocalHealthMonitor.DiskCheck diskMonitor = new LocalHealthMonitor.DiskCheck(nodeService); + DiskHealthInfo diskHealth = diskMonitor.getHealth(healthMetadata, clusterStateFrozenLocalNode); + assertThat(diskHealth, equalTo(new DiskHealthInfo(HealthStatus.GREEN, null))); + } + + public void testFrozenRedDiskStatus() { + simulateDiskOutOfSpace(); + ClusterState clusterStateFrozenLocalNode = clusterState.copyAndUpdate( + b -> b.nodes(DiscoveryNodes.builder().add(node).add(frozenNode).localNodeId(frozenNode.getId()).build()) + ); + LocalHealthMonitor.DiskCheck diskMonitor = new LocalHealthMonitor.DiskCheck(nodeService); + DiskHealthInfo diskHealth = diskMonitor.getHealth(healthMetadata, clusterStateFrozenLocalNode); + assertThat(diskHealth, equalTo(new DiskHealthInfo(HealthStatus.RED, DiskHealthInfo.Cause.FROZEN_NODE_OVER_FLOOD_STAGE_THRESHOLD))); + } + + private void simulateDiskOutOfSpace() { + when( + nodeService.stats( + eq(CommonStatsFlags.NONE), + eq(false), + eq(false), + eq(false), + eq(false), + eq(true), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false) + ) + ).thenReturn(nodeStats(1000, 10)); + } + + private void initializeIncreasedDiskSpaceUsage() { + when( + nodeService.stats( + eq(CommonStatsFlags.NONE), + eq(false), + eq(false), + eq(false), + eq(false), + eq(true), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false) + ) + ).thenReturn(nodeStats(1000, 80)); + } + + private void simulateHealthDiskSpace() { + when( + nodeService.stats( + eq(CommonStatsFlags.NONE), + eq(false), + eq(false), + eq(false), + eq(false), + eq(true), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false) + ) + ).thenReturn(nodeStats(1000, 110)); + } + + private NodeStats nodeStats(long total, long available) { + final FsInfo fs = new FsInfo(-1, null, new FsInfo.Path[] { new FsInfo.Path(null, null, total, 10, available) }); + return nodeStats(fs); + } + + private NodeStats nodeStats() { + return nodeStats(null); + } + + private NodeStats nodeStats(FsInfo fs) { + return new NodeStats( + node, // ignored + randomMillisUpToYear9999(), + null, + null, + null, + null, + null, + fs, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + } +} From aae0ed8eb1620ae26764c524170fe416a0e6b6b4 Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Wed, 3 Aug 2022 10:36:03 +0200 Subject: [PATCH 070/265] [DOCS] Added note about using _size in Kibana. Closes #88322 (#89030) * [DOCS] Added note about using _size in Kibana. Closes #88322 * Use correct attributes --- docs/plugins/mapper-size.asciidoc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/plugins/mapper-size.asciidoc b/docs/plugins/mapper-size.asciidoc index 50b2586f6f000..1929daa89bf92 100644 --- a/docs/plugins/mapper-size.asciidoc +++ b/docs/plugins/mapper-size.asciidoc @@ -83,3 +83,12 @@ GET my-index-000001/_search {ref}/search-fields.html#script-fields[script field] to return the `_size` field in the search response. +[NOTE] +.Using `_size` in {kib} +================================================ + +To use the `_size` field in {kib}, update the `metaFields` setting and add +`_size` to the list of meta fields. `metaFields` can be configured in {kib} +from the Advanced Settings page in Management. + +================================================ \ No newline at end of file From b761ba77e1f6a6e0b650b30ad0640c5141ae248e Mon Sep 17 00:00:00 2001 From: bellengao Date: Wed, 3 Aug 2022 17:09:43 +0800 Subject: [PATCH 071/265] Rename ignoredIndexSettings to ignoreIndexSettings in MountSearchableSnapshotRequest (#79604) Relates to #75982, #88987 and #89061 Co-authored-by: Tanguy Leroux Co-authored-by: gaobinlong --- .../MountSearchableSnapshotRequest.java | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java index 02657c384859e..7d57a5634fb4e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java @@ -91,7 +91,7 @@ public class MountSearchableSnapshotRequest extends MasterNodeRequest Date: Wed, 3 Aug 2022 10:23:57 +0100 Subject: [PATCH 072/265] Fix typo (#89063) --- docs/reference/troubleshooting/corruption-issues.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/troubleshooting/corruption-issues.asciidoc b/docs/reference/troubleshooting/corruption-issues.asciidoc index 8ded9b923de73..914045a69a8ea 100644 --- a/docs/reference/troubleshooting/corruption-issues.asciidoc +++ b/docs/reference/troubleshooting/corruption-issues.asciidoc @@ -22,7 +22,7 @@ filesystem cache, so systems typically don't verify the checksum on a file very often. This is why you tend only to encounter a corruption exception when something unusual is happening. For instance, corruptions are often detected during merges, shard movements, and snapshots. This does not mean that these -proceses are causing corruption: they are examples of the rare times where +processes are causing corruption: they are examples of the rare times where reading a whole file is necessary. {es} takes the opportunity to verify the checksum at the same time, and this is when the corruption is detected and reported. It doesn't indicate the cause of the corruption or when it happened. From 735f7d1f48666f75ed305db5322f8c52d9ad17ae Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 3 Aug 2022 12:48:19 +0300 Subject: [PATCH 073/265] [ML] Previously assigned models should get at least one allocation (#88855) When for some reason ML nodes are replaced (cluster resize, upgrade, etc.), it is possible that some models cannot be allocated at all. Then, while the cluster is temporarily undersized, all cores are given for allocations of the models that have survived. If those ML nodes return later, there may be model deployments that were previously allocated that now do not get any allocations. The reason is that our planner will try to preserve all current allocations. Operationally, this is not what serves best our users. Instead, as we are already in a cluster that does not have enough resources to fully allocate all model deployments, we should try to give at least one allocation to each model that has previously been allocated. In order to know a model has previously been allocated, this commit adds a field to `TrainedModelAssignment` called `max_assigned_allocations` which records the max number of allocations a deployment has received in its life. We can then use this to establish whether a deployment has ever been allocated. Finally, we modify the `AssignmentPlanner` so that after computing a plan we check whether the plan gives at least one allocation to all previously allocated models. If not, we then compute a plan that tries to give at least one allocation to each previously allocated model. We can solve this just using bin-packing. Having that plan we can invoke the planner one more time to optimize the rest of the allocations whilst preserving the single allocations for previously allocated models. --- .../assignment/TrainedModelAssignment.java | 64 +++++- .../TrainedModelAssignmentTests.java | 15 ++ .../TrainedModelAssignmentRebalancer.java | 7 +- .../planning/AbstractPreserveAllocations.java | 3 +- .../assignment/planning/AssignmentPlan.java | 43 +++- .../planning/AssignmentPlanner.java | 121 +++++++++-- .../planning/LinearProgrammingPlanSolver.java | 8 +- ...TrainedModelAssignmentRebalancerTests.java | 8 +- .../planning/AssignmentPlanTests.java | 100 ++++++--- .../planning/AssignmentPlannerTests.java | 194 +++++++++++++++--- .../planning/PreserveAllAllocationsTests.java | 10 +- .../planning/PreserveOneAllocationTests.java | 10 +- 12 files changed, 469 insertions(+), 114 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index e4fc15e669f80..f559e39546626 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -9,6 +9,7 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.io.stream.StreamInput; @@ -48,6 +49,7 @@ public class TrainedModelAssignment implements SimpleDiffable PARSER = new ConstructingObjectParser<>( @@ -59,7 +61,8 @@ public class TrainedModelAssignment implements SimpleDiffable nodeRoutingTable, AssignmentState assignmentState, String reason, - Instant startTime + Instant startTime, + Integer maxAssignedAllocations ) { this.taskParams = ExceptionsHelper.requireNonNull(taskParams, TASK_PARAMETERS); this.nodeRoutingTable = ExceptionsHelper.requireNonNull(nodeRoutingTable, ROUTING_TABLE); this.assignmentState = ExceptionsHelper.requireNonNull(assignmentState, ASSIGNMENT_STATE); this.reason = reason; this.startTime = ExceptionsHelper.requireNonNull(startTime, START_TIME); + this.maxAssignedAllocations = maxAssignedAllocations == null + ? totalCurrentAllocations() + : Math.max(maxAssignedAllocations, totalCurrentAllocations()); } public TrainedModelAssignment(StreamInput in) throws IOException { @@ -125,6 +142,11 @@ public TrainedModelAssignment(StreamInput in) throws IOException { this.assignmentState = in.readEnum(AssignmentState.class); this.reason = in.readOptionalString(); this.startTime = in.readInstant(); + if (in.getVersion().onOrAfter(Version.V_8_5_0)) { + this.maxAssignedAllocations = in.readVInt(); + } else { + this.maxAssignedAllocations = totalCurrentAllocations(); + } } public boolean isRoutedToNode(String nodeId) { @@ -189,6 +211,10 @@ public Instant getStartTime() { return startTime; } + public int getMaxAssignedAllocations() { + return maxAssignedAllocations; + } + public boolean isSatisfied(Set assignableNodeIds) { int allocations = nodeRoutingTable.entrySet() .stream() @@ -203,6 +229,10 @@ public boolean hasOutdatedRoutingEntries() { return nodeRoutingTable.values().stream().anyMatch(RoutingInfo::isOutdated); } + public int totalCurrentAllocations() { + return nodeRoutingTable.values().stream().mapToInt(RoutingInfo::getCurrentAllocations).sum(); + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -212,12 +242,13 @@ public boolean equals(Object o) { && Objects.equals(taskParams, that.taskParams) && Objects.equals(reason, that.reason) && Objects.equals(assignmentState, that.assignmentState) - && Objects.equals(startTime, that.startTime); + && Objects.equals(startTime, that.startTime) + && maxAssignedAllocations == that.maxAssignedAllocations; } @Override public int hashCode() { - return Objects.hash(nodeRoutingTable, taskParams, assignmentState, reason, startTime); + return Objects.hash(nodeRoutingTable, taskParams, assignmentState, reason, startTime, maxAssignedAllocations); } @Override @@ -230,6 +261,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(REASON.getPreferredName(), reason); } builder.timeField(START_TIME.getPreferredName(), startTime); + builder.field(MAX_ASSIGNED_ALLOCATIONS.getPreferredName(), maxAssignedAllocations); builder.endObject(); return builder; } @@ -241,6 +273,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(assignmentState); out.writeOptionalString(reason); out.writeInstant(startTime); + if (out.getVersion().onOrAfter(Version.V_8_5_0)) { + out.writeVInt(maxAssignedAllocations); + } } public Optional calculateAllocationStatus() { @@ -261,6 +296,7 @@ public static class Builder { private AssignmentState assignmentState; private String reason; private Instant startTime; + private int maxAssignedAllocations; public static Builder fromAssignment(TrainedModelAssignment assignment) { return new Builder( @@ -268,7 +304,8 @@ public static Builder fromAssignment(TrainedModelAssignment assignment) { assignment.nodeRoutingTable, assignment.assignmentState, assignment.reason, - assignment.startTime + assignment.startTime, + assignment.maxAssignedAllocations ); } @@ -281,17 +318,19 @@ private Builder( Map nodeRoutingTable, AssignmentState assignmentState, String reason, - Instant startTime + Instant startTime, + int maxAssignedAllocations ) { this.taskParams = taskParams; this.nodeRoutingTable = new LinkedHashMap<>(nodeRoutingTable); this.assignmentState = assignmentState; this.reason = reason; this.startTime = startTime; + this.maxAssignedAllocations = maxAssignedAllocations; } private Builder(StartTrainedModelDeploymentAction.TaskParams taskParams) { - this(taskParams, new LinkedHashMap<>(), AssignmentState.STARTING, null, Instant.now()); + this(taskParams, new LinkedHashMap<>(), AssignmentState.STARTING, null, Instant.now(), 0); } public Builder setStartTime(Instant startTime) { @@ -299,6 +338,11 @@ public Builder setStartTime(Instant startTime) { return this; } + public Builder setMaxAssignedAllocations(int maxAssignedAllocations) { + this.maxAssignedAllocations = maxAssignedAllocations; + return this; + } + public Builder addRoutingEntry(String nodeId, RoutingInfo routingInfo) { if (nodeRoutingTable.containsKey(nodeId)) { throw new ResourceAlreadyExistsException( @@ -383,7 +427,7 @@ public Builder clearReason() { } public TrainedModelAssignment build() { - return new TrainedModelAssignment(taskParams, nodeRoutingTable, assignmentState, reason, startTime); + return new TrainedModelAssignment(taskParams, nodeRoutingTable, assignmentState, reason, startTime, maxAssignedAllocations); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java index 323fb60314dc6..812614f640fdb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java @@ -257,6 +257,21 @@ public void testIsSatisfied_GivenNotEnoughAllocations() { assertThat(assignment.isSatisfied(Sets.newHashSet("node-1", "node-2", "node-3")), is(false)); } + public void testMaxAssignedAllocations() { + TrainedModelAssignment assignment = TrainedModelAssignment.Builder.empty(randomTaskParams(10)) + .addRoutingEntry("node-1", new RoutingInfo(1, 2, RoutingState.STARTED, "")) + .addRoutingEntry("node-2", new RoutingInfo(2, 1, RoutingState.STARTED, "")) + .addRoutingEntry("node-3", new RoutingInfo(3, 3, RoutingState.STARTING, "")) + .build(); + assertThat(assignment.getMaxAssignedAllocations(), equalTo(6)); + + TrainedModelAssignment assignmentAfterRemovingNode = TrainedModelAssignment.Builder.fromAssignment(assignment) + .removeRoutingEntry("node-1") + .build(); + assertThat(assignmentAfterRemovingNode.getMaxAssignedAllocations(), equalTo(6)); + assertThat(assignmentAfterRemovingNode.totalCurrentAllocations(), equalTo(5)); + } + private void assertValueWithinPercentageOfExpectedRatio(long value, long totalCount, double ratio, double tolerance) { double expected = totalCount * ratio; double lowerBound = (1.0 - tolerance) * expected; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java index d25623550b2e6..209a3a1fc73ab 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java @@ -111,7 +111,8 @@ AssignmentPlan computeAssignmentPlan() { assignment.getTaskParams().estimateMemoryUsageBytes(), assignment.getTaskParams().getNumberOfAllocations(), assignment.getTaskParams().getThreadsPerAllocation(), - currentAssignments + currentAssignments, + assignment.getMaxAssignedAllocations() ); }).forEach(planModels::add); modelToAdd.ifPresent( @@ -121,7 +122,8 @@ AssignmentPlan computeAssignmentPlan() { taskParams.estimateMemoryUsageBytes(), taskParams.getNumberOfAllocations(), taskParams.getThreadsPerAllocation(), - Map.of() + Map.of(), + 0 ) ) ); @@ -157,6 +159,7 @@ private TrainedModelAssignmentMetadata.Builder buildAssignmentsFromPlan(Assignme ); if (existingAssignment != null) { assignmentBuilder.setStartTime(existingAssignment.getStartTime()); + assignmentBuilder.setMaxAssignedAllocations(existingAssignment.getMaxAssignedAllocations()); } Map assignments = assignmentPlan.assignments(model).orElseGet(Map::of); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java index 6aa71bafb4662..4aded2f295743 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java @@ -57,7 +57,8 @@ Model modifyModelPreservingPreviousAssignments(Model m) { m.memoryBytes(), m.allocations() - calculatePreservedAllocations(m), m.threadsPerAllocation(), - calculateAllocationsPerNodeToPreserve(m) + calculateAllocationsPerNodeToPreserve(m), + m.maxAssignedAllocations() ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java index 24994b031d9ba..8dd1abc48309e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java @@ -32,13 +32,18 @@ public record Model( long memoryBytes, int allocations, int threadsPerAllocation, - Map currentAllocationsByNodeId + Map currentAllocationsByNodeId, + int maxAssignedAllocations ) { - int getPreviouslyAssignedAllocations() { + int getCurrentAssignedAllocations() { return currentAllocationsByNodeId.values().stream().mapToInt(Integer::intValue).sum(); } + boolean hasEverBeenAllocated() { + return maxAssignedAllocations > 0; + } + @Override public String toString() { return id @@ -50,6 +55,8 @@ public String toString() { + threadsPerAllocation + ") (current_allocations = " + currentAllocationsByNodeId + + ") (max_assigned_allocations = " + + maxAssignedAllocations + ")"; } }; @@ -108,17 +115,17 @@ public int compareTo(AssignmentPlan o) { return Comparator.comparing(AssignmentPlan::computeQuality).compare(this, o); } - public boolean satisfiesPreviousAssignments() { - return models().stream().allMatch(this::isSatisfyingPreviousAssignmentsForModel); + public boolean satisfiesCurrentAssignments() { + return models().stream().allMatch(this::isSatisfyingCurrentAssignmentsForModel); } - private boolean isSatisfyingPreviousAssignmentsForModel(Model m) { + private boolean isSatisfyingCurrentAssignmentsForModel(Model m) { if (m.currentAllocationsByNodeId().isEmpty()) { return true; } Map nodeAssignments = assignments.get(m); int currentAllocations = nodeAssignments.values().stream().mapToInt(Integer::intValue).sum(); - return currentAllocations >= m.getPreviouslyAssignedAllocations(); + return currentAllocations >= m.getCurrentAssignedAllocations(); } public boolean satisfiesAllocations(Model m) { @@ -129,6 +136,21 @@ public boolean satisfiesAllModels() { return models().stream().allMatch(this::satisfiesAllocations); } + public boolean arePreviouslyAssignedModelsAssigned() { + return models().stream() + .filter(Model::hasEverBeenAllocated) + .map(this::totalAllocations) + .allMatch(totalAllocations -> totalAllocations > 0); + } + + public long countPreviouslyAssignedModelsThatAreStillAssigned() { + return models().stream() + .filter(Model::hasEverBeenAllocated) + .map(this::totalAllocations) + .filter(totalAllocations -> totalAllocations > 0) + .count(); + } + public int getRemainingNodeCores(String nodeId) { return remainingNodeCores.getOrDefault(nodeId, 0); } @@ -137,6 +159,13 @@ public long getRemainingNodeMemory(String nodeId) { return remainingNodeMemory.getOrDefault(nodeId, 0L); } + public int totalAllocations(Model m) { + if (assignments.containsKey(m) == false) { + return 0; + } + return assignments.get(m).values().stream().mapToInt(Integer::intValue).sum(); + } + private Quality computeQuality() { boolean isSatisfyingPreviousAssignments = true; double weighedAllocationsScore = 0; @@ -144,7 +173,7 @@ private Quality computeQuality() { for (Map.Entry> entry : assignments.entrySet()) { Model m = entry.getKey(); - isSatisfyingPreviousAssignments = isSatisfyingPreviousAssignments && isSatisfyingPreviousAssignmentsForModel(m); + isSatisfyingPreviousAssignments = isSatisfyingPreviousAssignments && isSatisfyingCurrentAssignmentsForModel(m); Map modelAssignments = entry.getValue(); if (modelAssignments != null) { for (Map.Entry nodeAllocations : modelAssignments.entrySet()) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java index dd50ebdbd0f23..4c0812932dd09 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java @@ -14,7 +14,11 @@ import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Node; import java.util.Comparator; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; import static org.elasticsearch.core.Strings.format; @@ -34,6 +38,9 @@ * Furthermore, the planner preserves at least one allocation for all existing * assignments. This way, the new plan will only have new assignments and the * transition can happen with minimal impact on performance of started deployments. + * However, if previously assigned models do not receive any allocation, then we + * attempt to find a solution that provides at least one allocation to + * previously assigned models. */ public class AssignmentPlanner { @@ -48,44 +55,122 @@ public AssignmentPlanner(List nodes, List models) { } public AssignmentPlan computePlan() { + return computePlan(true); + } + + private AssignmentPlan computePlan(boolean tryAssigningPreviouslyAssignedModels) { logger.debug(() -> format("Computing plan for nodes = %s; models = %s", nodes, models)); AssignmentPlan bestPlan; - // First solve preserving one allocation per assignment because that is most flexible - AssignmentPlan planKeepingOneAllocationOnPreviousAssignments = solveKeepingOneAllocationOnPreviousAssignments(); - if (planKeepingOneAllocationOnPreviousAssignments.satisfiesPreviousAssignments() == false) { - bestPlan = solvePreservingAllPreviousAssignments(); - } else if (planKeepingOneAllocationOnPreviousAssignments.satisfiesAllModels() == false) { - AssignmentPlan planKeepingAllAllocationsOnPreviousAssignments = solvePreservingAllPreviousAssignments(); - bestPlan = planKeepingAllAllocationsOnPreviousAssignments.compareTo(planKeepingOneAllocationOnPreviousAssignments) >= 0 - ? planKeepingAllAllocationsOnPreviousAssignments - : planKeepingOneAllocationOnPreviousAssignments; + AssignmentPlan planSatisfyingCurrentAssignments = solveSatisfyingCurrentAssignments(); + logger.debug(() -> "Plan satisfying current assignments =\n" + planSatisfyingCurrentAssignments.prettyPrint()); + if (planSatisfyingCurrentAssignments.arePreviouslyAssignedModelsAssigned() == false && tryAssigningPreviouslyAssignedModels) { + AssignmentPlan planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated = + solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated(); + logger.debug( + () -> "Plan with at least one allocation for previously assigned models =\n" + + planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated.prettyPrint() + ); + if (planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated.arePreviouslyAssignedModelsAssigned()) { + bestPlan = planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated; + } else { + bestPlan = planSatisfyingCurrentAssignments + .countPreviouslyAssignedModelsThatAreStillAssigned() >= planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated + .countPreviouslyAssignedModelsThatAreStillAssigned() + ? planSatisfyingCurrentAssignments + : planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated; + } } else { - bestPlan = planKeepingOneAllocationOnPreviousAssignments; + bestPlan = planSatisfyingCurrentAssignments; } + logger.debug(() -> "Best plan =\n" + bestPlan.prettyPrint()); logger.debug(() -> prettyPrintOverallStats(bestPlan)); return bestPlan; } - private AssignmentPlan solveKeepingOneAllocationOnPreviousAssignments() { + private AssignmentPlan solveSatisfyingCurrentAssignments() { + AssignmentPlan bestPlan; + // First solve preserving one allocation per assignment because that is most flexible + AssignmentPlan planKeepingOneAllocationOnCurrentAssignments = solveKeepingOneAllocationOnCurrentAssignments(); + if (planKeepingOneAllocationOnCurrentAssignments.satisfiesCurrentAssignments() == false) { + bestPlan = solvePreservingAllAllocationsOnCurrentAssignments(); + } else if (planKeepingOneAllocationOnCurrentAssignments.satisfiesAllModels() == false) { + AssignmentPlan planKeepingAllAllocationsOnCurrentAssignments = solvePreservingAllAllocationsOnCurrentAssignments(); + bestPlan = planKeepingAllAllocationsOnCurrentAssignments.compareTo(planKeepingOneAllocationOnCurrentAssignments) >= 0 + ? planKeepingAllAllocationsOnCurrentAssignments + : planKeepingOneAllocationOnCurrentAssignments; + } else { + bestPlan = planKeepingOneAllocationOnCurrentAssignments; + } + return bestPlan; + } + + private AssignmentPlan solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated() { + logger.debug(() -> "Attempting to solve assigning at least one allocations to previously assigned models"); + List previouslyAssignedModelsOnly = models.stream() + .filter(m -> m.hasEverBeenAllocated()) + .map( + m -> new Model( + m.id(), + m.memoryBytes(), + 1, + m.threadsPerAllocation(), + m.currentAllocationsByNodeId(), + m.maxAssignedAllocations() + ) + ) + .toList(); + AssignmentPlan planWithSingleAllocationForPreviouslyAssignedModels = new LinearProgrammingPlanSolver( + nodes, + previouslyAssignedModelsOnly + ).solvePlan(true); + + Map modelIdToNodeIdWithSingleAllocation = new HashMap<>(); + for (Model m : planWithSingleAllocationForPreviouslyAssignedModels.models()) { + Optional> assignments = planWithSingleAllocationForPreviouslyAssignedModels.assignments(m); + Set nodes = assignments.orElse(Map.of()).keySet(); + if (nodes.isEmpty() == false) { + assert nodes.size() == 1; + modelIdToNodeIdWithSingleAllocation.put(m.id(), nodes.iterator().next().id()); + } + } + + List planModels = models.stream().map(m -> { + Map currentAllocationsByNodeId = modelIdToNodeIdWithSingleAllocation.containsKey(m.id()) + ? Map.of(modelIdToNodeIdWithSingleAllocation.get(m.id()), 1) + : Map.of(); + return new Model( + m.id(), + m.memoryBytes(), + m.allocations(), + m.threadsPerAllocation(), + currentAllocationsByNodeId, + m.maxAssignedAllocations() + ); + }).toList(); + + return new AssignmentPlanner(nodes, planModels).computePlan(false); + } + + private AssignmentPlan solveKeepingOneAllocationOnCurrentAssignments() { // We do not want to ever completely unassign a model from a node so we // can move allocations without having temporary impact on performance. - logger.trace(() -> format("Solving preserving one allocation on previous assignments")); - return solvePreservingPreviousAssignments(new PreserveOneAllocation(nodes, models)); + logger.trace(() -> format("Solving preserving one allocation on current assignments")); + return solvePreservingCurrentAssignments(new PreserveOneAllocation(nodes, models)); } - private AssignmentPlan solvePreservingAllPreviousAssignments() { - logger.trace(() -> format("Solving preserving all allocations on previous assignments")); - return solvePreservingPreviousAssignments(new PreserveAllAllocations(nodes, models)); + private AssignmentPlan solvePreservingAllAllocationsOnCurrentAssignments() { + logger.trace(() -> format("Solving preserving all allocations on current assignments")); + return solvePreservingCurrentAssignments(new PreserveAllAllocations(nodes, models)); } - private AssignmentPlan solvePreservingPreviousAssignments(AbstractPreserveAllocations preserveAllocations) { + private AssignmentPlan solvePreservingCurrentAssignments(AbstractPreserveAllocations preserveAllocations) { List planNodes = preserveAllocations.nodesPreservingAllocations(); List planModels = preserveAllocations.modelsPreservingAllocations(); logger.trace(() -> format("Nodes after applying allocation preserving strategy = %s", planNodes)); logger.trace(() -> format("Models after applying allocation preserving strategy = %s", planModels)); - AssignmentPlan assignmentPlan = new LinearProgrammingPlanSolver(planNodes, planModels).solvePlan(); + AssignmentPlan assignmentPlan = new LinearProgrammingPlanSolver(planNodes, planModels).solvePlan(false); return preserveAllocations.mergePreservedAllocations(assignmentPlan); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java index e798eb0b1db9c..61268946335e4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java @@ -92,13 +92,17 @@ class LinearProgrammingPlanSolver { .collect(Collectors.toMap(Function.identity(), m -> m.memoryBytes() / (double) maxModelMemoryBytes)); } - AssignmentPlan solvePlan() { + AssignmentPlan solvePlan(boolean useBinPackingOnly) { if (models.isEmpty() || maxNodeCores == 0) { return AssignmentPlan.builder(nodes, models).build(); } Tuple, Double>, AssignmentPlan> weightsAndBinPackingPlan = calculateWeightsAndBinPackingPlan(); + if (useBinPackingOnly) { + return weightsAndBinPackingPlan.v2(); + } + Map, Double> allocationValues = new HashMap<>(); Map, Double> assignmentValues = new HashMap<>(); if (solveLinearProgram(weightsAndBinPackingPlan.v1(), allocationValues, assignmentValues) == false) { @@ -275,7 +279,7 @@ private boolean solveLinearProgram( // Each model should not get more allocations than is required. // Also, if the model has previous assignments, it should get at least as many allocations as it did before. model.addExpression("allocations_of_model_" + m.id() + "_not_more_than_required") - .lower(m.getPreviouslyAssignedAllocations()) + .lower(m.getCurrentAssignedAllocations()) .upper(m.allocations()) .setLinearFactorsSimple(varsForModel(m, allocationVars)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java index 942c4624b25c4..0df7073f2a8ff 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java @@ -418,7 +418,7 @@ public void testRebalance_GivenPreviousAssignments_AndRemovedNode_AndRemainingNo assertThat(assignment.getNodeRoutingTable(), is(aMapWithSize(1))); assertThat(assignment.getNodeRoutingTable(), hasKey("node-1")); assertThat(assignment.getNodeRoutingTable().get("node-1").getCurrentAllocations(), equalTo(2)); - assertThat(assignment.getNodeRoutingTable().get("node-1").getTargetAllocations(), equalTo(2)); + assertThat(assignment.getNodeRoutingTable().get("node-1").getTargetAllocations(), equalTo(1)); assertThat(assignment.getNodeRoutingTable().get("node-1").getState(), equalTo(RoutingState.STARTED)); assertThat(assignment.getReason().isPresent(), is(true)); assertThat( @@ -433,7 +433,11 @@ public void testRebalance_GivenPreviousAssignments_AndRemovedNode_AndRemainingNo TrainedModelAssignment assignment = result.getModelAssignment(previousModel2Id); assertThat(assignment, is(notNullValue())); assertThat(assignment.getAssignmentState(), equalTo(AssignmentState.STARTING)); - assertThat(assignment.getNodeRoutingTable(), is(anEmptyMap())); + assertThat(assignment.getNodeRoutingTable(), is(aMapWithSize(1))); + assertThat(assignment.getNodeRoutingTable(), hasKey("node-1")); + assertThat(assignment.getNodeRoutingTable().get("node-1").getCurrentAllocations(), equalTo(2)); + assertThat(assignment.getNodeRoutingTable().get("node-1").getTargetAllocations(), equalTo(2)); + assertThat(assignment.getNodeRoutingTable().get("node-1").getState(), equalTo(RoutingState.STARTING)); assertThat(assignment.getReason().isPresent(), is(true)); assertThat( assignment.getReason().get(), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java index 9648339139801..823fa139c52da 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java @@ -24,21 +24,21 @@ public class AssignmentPlanTests extends ESTestCase { public void testBuilderCtor_GivenDuplicateNode() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 40, 1, 2, Map.of()); + Model m = new Model("m_1", 40, 1, 2, Map.of(), 0); expectThrows(IllegalArgumentException.class, () -> AssignmentPlan.builder(List.of(n, n), List.of(m))); } public void testBuilderCtor_GivenDuplicateModel() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 40, 1, 2, Map.of()); + Model m = new Model("m_1", 40, 1, 2, Map.of(), 0); expectThrows(IllegalArgumentException.class, () -> AssignmentPlan.builder(List.of(n), List.of(m, m))); } public void testAssignModelToNode_GivenNoPreviousAssignment() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 40, 1, 2, Map.of()); + Model m = new Model("m_1", 40, 1, 2, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -57,13 +57,13 @@ public void testAssignModelToNode_GivenNoPreviousAssignment() { AssignmentPlan plan = builder.build(); assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesPreviousAssignments(), is(true)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); } - public void testAssignModelToNode_GivenNewPlanSatisfiesPreviousAssignment() { + public void testAssignModelToNode_GivenNewPlanSatisfiesCurrentAssignment() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 40, 2, 2, Map.of("n_1", 1)); + Model m = new Model("m_1", 40, 2, 2, Map.of("n_1", 1), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -77,13 +77,13 @@ public void testAssignModelToNode_GivenNewPlanSatisfiesPreviousAssignment() { AssignmentPlan plan = builder.build(); assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesPreviousAssignments(), is(true)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); } - public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyPreviousAssignment() { + public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyCurrentAssignment() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 40, 2, 2, Map.of("n_1", 2)); + Model m = new Model("m_1", 40, 2, 2, Map.of("n_1", 2), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -97,13 +97,13 @@ public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyPreviousAssignment() AssignmentPlan plan = builder.build(); assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesPreviousAssignments(), is(false)); + assertThat(plan.satisfiesCurrentAssignments(), is(false)); assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); } public void testAssignModelToNode_GivenPreviouslyUnassignedModelDoesNotFit() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 101, 2, 2, Map.of()); + Model m = new Model("m_1", 101, 2, 2, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 1)); @@ -113,20 +113,20 @@ public void testAssignModelToNode_GivenPreviouslyUnassignedModelDoesNotFit() { public void testAssignModelToNode_GivenPreviouslyAssignedModelDoesNotFit() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 101, 2, 2, Map.of("n_1", 1)); + Model m = new Model("m_1", 101, 2, 2, Map.of("n_1", 1), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); AssignmentPlan plan = builder.build(); assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesPreviousAssignments(), is(true)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 2))); } public void testAssignModelToNode_GivenNotEnoughCores_AndSingleThreadPerAllocation() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 100, 5, 1, Map.of()); + Model m = new Model("m_1", 100, 5, 1, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 5)); @@ -139,7 +139,7 @@ public void testAssignModelToNode_GivenNotEnoughCores_AndSingleThreadPerAllocati public void testAssignModelToNode_GivenNotEnoughCores_AndMultipleThreadsPerAllocation() { Node n = new Node("n_1", 100, 5); - Model m = new Model("m_1", 100, 3, 2, Map.of()); + Model m = new Model("m_1", 100, 3, 2, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 3)); @@ -152,7 +152,7 @@ public void testAssignModelToNode_GivenNotEnoughCores_AndMultipleThreadsPerAlloc public void testAssignModelToNode_GivenSameModelAssignedTwice() { Node n = new Node("n_1", 100, 8); - Model m = new Model("m_1", 60, 4, 2, Map.of()); + Model m = new Model("m_1", 60, 4, 2, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -180,13 +180,13 @@ public void testAssignModelToNode_GivenSameModelAssignedTwice() { AssignmentPlan plan = builder.build(); assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesPreviousAssignments(), is(true)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 3))); } public void testCanAssign_GivenPreviouslyUnassignedModelDoesNotFit() { Node n = new Node("n_1", 100, 5); - Model m = new Model("m_1", 101, 1, 1, Map.of()); + Model m = new Model("m_1", 101, 1, 1, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -195,7 +195,7 @@ public void testCanAssign_GivenPreviouslyUnassignedModelDoesNotFit() { public void testCanAssign_GivenPreviouslyAssignedModelDoesNotFit() { Node n = new Node("n_1", 100, 5); - Model m = new Model("m_1", 101, 1, 1, Map.of("n_1", 1)); + Model m = new Model("m_1", 101, 1, 1, Map.of("n_1", 1), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -204,7 +204,7 @@ public void testCanAssign_GivenPreviouslyAssignedModelDoesNotFit() { public void testCanAssign_GivenEnoughMemory() { Node n = new Node("n_1", 100, 5); - Model m = new Model("m_1", 100, 3, 2, Map.of()); + Model m = new Model("m_1", 100, 3, 2, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -219,13 +219,13 @@ public void testCompareTo_GivenDifferenceInPreviousAssignments() { Node n = new Node("n_1", 100, 5); { - Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 2)); + Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 2), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planSatisfyingPreviousAssignments = builder.build(); } { - Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 3)); + Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 3), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planNotSatisfyingPreviousAssignments = builder.build(); @@ -239,7 +239,7 @@ public void testCompareTo_GivenDifferenceInAllocations() { AssignmentPlan planWithMoreAllocations; AssignmentPlan planWithFewerAllocations; Node n = new Node("n_1", 100, 5); - Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 1)); + Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 1), 0); { AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -262,13 +262,13 @@ public void testCompareTo_GivenDifferenceInMemory() { Node n = new Node("n_1", 100, 5); { - Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 1)); + Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 1), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planUsingMoreMemory = builder.build(); } { - Model m = new Model("m_1", 99, 3, 2, Map.of("n_1", 1)); + Model m = new Model("m_1", 99, 3, 2, Map.of("n_1", 1), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planUsingLessMemory = builder.build(); @@ -281,9 +281,9 @@ public void testCompareTo_GivenDifferenceInMemory() { public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { Node node1 = new Node("n_1", 100, 4); Node node2 = new Node("n_2", 100, 4); - Model model1 = new Model("m_1", 50, 1, 2, Map.of()); - Model model2 = new Model("m_2", 30, 2, 1, Map.of()); - Model model3 = new Model("m_3", 20, 4, 1, Map.of()); + Model model1 = new Model("m_1", 50, 1, 2, Map.of(), 0); + Model model2 = new Model("m_2", 30, 2, 1, Map.of(), 0); + Model model3 = new Model("m_3", 20, 4, 1, Map.of(), 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(model1, model2, model3)) .assignModelToNode(model1, node1, 1) .assignModelToNode(model2, node2, 2) @@ -296,9 +296,9 @@ public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() { Node node1 = new Node("n_1", 100, 4); Node node2 = new Node("n_2", 100, 4); - Model model1 = new Model("m_1", 50, 1, 2, Map.of()); - Model model2 = new Model("m_2", 30, 2, 1, Map.of()); - Model model3 = new Model("m_3", 20, 4, 1, Map.of()); + Model model1 = new Model("m_1", 50, 1, 2, Map.of(), 0); + Model model2 = new Model("m_2", 30, 2, 1, Map.of(), 0); + Model model3 = new Model("m_3", 20, 4, 1, Map.of(), 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(model1, model2, model3)) .assignModelToNode(model1, node1, 1) .assignModelToNode(model2, node2, 2) @@ -307,4 +307,42 @@ public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() { .build(); assertThat(plan.satisfiesAllModels(), is(false)); } + + public void testArePreviouslyAssignedModelsAssigned_GivenTrue() { + Node node1 = new Node("n_1", 100, 4); + Node node2 = new Node("n_2", 100, 4); + Model model1 = new Model("m_1", 50, 1, 2, Map.of(), 3); + Model model2 = new Model("m_2", 30, 2, 1, Map.of(), 4); + Model model3 = new Model("m_3", 20, 4, 1, Map.of(), 0); + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(model1, model2, model3)) + .assignModelToNode(model1, node1, 1) + .assignModelToNode(model2, node2, 1) + .build(); + assertThat(plan.arePreviouslyAssignedModelsAssigned(), is(true)); + } + + public void testArePreviouslyAssignedModelsAssigned_GivenFalse() { + Node node1 = new Node("n_1", 100, 4); + Node node2 = new Node("n_2", 100, 4); + Model model1 = new Model("m_1", 50, 1, 2, Map.of(), 3); + Model model2 = new Model("m_2", 30, 2, 1, Map.of(), 4); + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(model1, model2)) + .assignModelToNode(model1, node1, 1) + .build(); + assertThat(plan.arePreviouslyAssignedModelsAssigned(), is(false)); + } + + public void testCountPreviouslyAssignedThatAreStillAssigned() { + Node node1 = new Node("n_1", 100, 4); + Node node2 = new Node("n_2", 100, 4); + Model model1 = new Model("m_1", 50, 1, 2, Map.of(), 3); + Model model2 = new Model("m_2", 30, 2, 1, Map.of(), 4); + Model model3 = new Model("m_3", 20, 4, 1, Map.of(), 1); + Model model4 = new Model("m_4", 20, 4, 1, Map.of(), 0); + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(model1, model2, model3, model4)) + .assignModelToNode(model1, node1, 1) + .assignModelToNode(model2, node2, 1) + .build(); + assertThat(plan.countPreviouslyAssignedModelsThatAreStillAssigned(), equalTo(2L)); + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java index 3db1989d03bd0..3ec3f8cece8eb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java @@ -14,16 +14,19 @@ import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Node; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -32,14 +35,14 @@ public class AssignmentPlannerTests extends ESTestCase { public void testModelThatDoesNotFitInMemory() { List nodes = List.of(new Node("n_1", 100, 4)); - Model model = new Model("m_1", 101, 4, 1, Map.of()); + Model model = new Model("m_1", 101, 4, 1, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(model)).computePlan(); assertThat(plan.assignments(model).isEmpty(), is(true)); } public void testModelWithThreadsPerAllocationNotFittingOnAnyNode() { List nodes = List.of(new Node("n_1", 100, 4), new Node("n_2", 100, 5)); - Model model = new Model("m_1", 1, 1, 6, Map.of()); + Model model = new Model("m_1", 1, 1, 6, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(model)).computePlan(); assertThat(plan.assignments(model).isEmpty(), is(true)); } @@ -47,19 +50,19 @@ public void testModelWithThreadsPerAllocationNotFittingOnAnyNode() { public void testSingleModelThatFitsFullyOnSingleNode() { { Node node = new Node("n_1", 100, 4); - Model model = new Model("m_1", 100, 1, 1, Map.of()); + Model model = new Model("m_1", 100, 1, 1, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(model)).computePlan(); assertModelFullyAssignedToNode(plan, model, node); } { Node node = new Node("n_1", 1000, 8); - Model model = new Model("m_1", 1000, 8, 1, Map.of()); + Model model = new Model("m_1", 1000, 8, 1, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(model)).computePlan(); assertModelFullyAssignedToNode(plan, model, node); } { Node node = new Node("n_1", 10000, 16); - Model model = new Model("m_1", 10000, 1, 16, Map.of()); + Model model = new Model("m_1", 10000, 1, 16, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(model)).computePlan(); assertModelFullyAssignedToNode(plan, model, node); } @@ -68,7 +71,7 @@ public void testSingleModelThatFitsFullyOnSingleNode() { public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFullyAssignedOnOneNode() { Node node1 = new Node("n_1", 100, 4); Node node2 = new Node("n_2", 100, 4); - Model model = new Model("m_1", 100, 4, 1, Map.of()); + Model model = new Model("m_1", 100, 4, 1, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node1, node2), List.of(model)).computePlan(); @@ -81,7 +84,7 @@ public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFully } public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerAllocation() { - Model model = new Model("m_1", 30, 10, 1, Map.of()); + Model model = new Model("m_1", 30, 10, 1, Map.of(), 0); // Single node { Node node = new Node("n_1", 100, 4); @@ -119,10 +122,10 @@ public void testMultipleModelsAndNodesWithSingleSolution() { Node node2 = new Node("n_2", 100, 7); Node node3 = new Node("n_3", 100, 2); Node node4 = new Node("n_4", 100, 2); - Model model1 = new Model("m_1", 50, 2, 4, Map.of()); - Model model2 = new Model("m_2", 50, 2, 3, Map.of()); - Model model3 = new Model("m_3", 50, 1, 2, Map.of()); - Model model4 = new Model("m_4", 50, 2, 1, Map.of()); + Model model1 = new Model("m_1", 50, 2, 4, Map.of(), 0); + Model model2 = new Model("m_2", 50, 2, 3, Map.of(), 0); + Model model3 = new Model("m_3", 50, 1, 2, Map.of(), 0); + Model model4 = new Model("m_4", 50, 2, 1, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node1, node2, node3, node4), List.of(model1, model2, model3, model4)) .computePlan(); @@ -168,7 +171,7 @@ public void testMultipleModelsAndNodesWithSingleSolution() { } public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerAllocation() { - Model model = new Model("m_1", 30, 10, 3, Map.of()); + Model model = new Model("m_1", 30, 10, 3, Map.of(), 0); // Single node { Node node = new Node("n_1", 100, 4); @@ -203,7 +206,7 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerA public void testModelWithPreviousAssignmentAndNoMoreCoresAvailable() { Node node = new Node("n_1", 100, 4); - Model model = new Model("m_1", 30, 4, 1, Map.of("n_1", 4)); + Model model = new Model("m_1", 30, 4, 1, Map.of("n_1", 4), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(model)).computePlan(); assertThat(plan.assignments(model).isPresent(), is(true)); @@ -220,18 +223,18 @@ public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation() { new Node("n_6", ByteSizeValue.ofGb(8).getBytes(), 16) ); List models = List.of( - new Model("m_1", ByteSizeValue.ofGb(4).getBytes(), 10, 1, Map.of("n_1", 5)), - new Model("m_2", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of("n_3", 2)), - new Model("m_3", ByteSizeValue.ofGb(3).getBytes(), 3, 1, Map.of()), - new Model("m_4", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of("n_3", 2)), - new Model("m_5", ByteSizeValue.ofGb(6).getBytes(), 2, 1, Map.of()), - new Model("m_6", ByteSizeValue.ofGb(1).getBytes(), 12, 1, Map.of()), - new Model("m_7", ByteSizeValue.ofGb(1).getBytes() / 2, 12, 1, Map.of("n_2", 6)), - new Model("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of()), - new Model("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of()), - new Model("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of()), - new Model("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of()), - new Model("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of()) + new Model("m_1", ByteSizeValue.ofGb(4).getBytes(), 10, 1, Map.of("n_1", 5), 0), + new Model("m_2", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of("n_3", 2), 0), + new Model("m_3", ByteSizeValue.ofGb(3).getBytes(), 3, 1, Map.of(), 0), + new Model("m_4", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of("n_3", 2), 0), + new Model("m_5", ByteSizeValue.ofGb(6).getBytes(), 2, 1, Map.of(), 0), + new Model("m_6", ByteSizeValue.ofGb(1).getBytes(), 12, 1, Map.of(), 0), + new Model("m_7", ByteSizeValue.ofGb(1).getBytes() / 2, 12, 1, Map.of("n_2", 6), 0), + new Model("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0), + new Model("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0), + new Model("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0), + new Model("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0), + new Model("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0) ); AssignmentPlan assignmentPlan = new AssignmentPlanner(nodes, models).computePlan(); @@ -330,7 +333,9 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode Map previousAssignments = assignments.entrySet() .stream() .collect(Collectors.toMap(e -> e.getKey().id(), Map.Entry::getValue)); - previousModelsPlusNew.add(new Model(m.id(), m.memoryBytes(), m.allocations(), m.threadsPerAllocation(), previousAssignments)); + previousModelsPlusNew.add( + new Model(m.id(), m.memoryBytes(), m.allocations(), m.threadsPerAllocation(), previousAssignments, 0) + ); } previousModelsPlusNew.add(randomModel("new")); @@ -343,8 +348,8 @@ public void testGivenLargerModelWithPreviousAssignmentsAndSmallerModelWithoutAss Node node1 = new Node("n_1", ByteSizeValue.ofGb(2).getBytes(), 2); Node node2 = new Node("n_2", ByteSizeValue.ofGb(2).getBytes(), 2); Node node3 = new Node("n_3", ByteSizeValue.ofGb(2).getBytes(), 2); - Model model1 = new Model("m_1", ByteSizeValue.ofMb(1200).getBytes(), 3, 1, Map.of("n_1", 2, "n_2", 1)); - Model model2 = new Model("m_2", ByteSizeValue.ofMb(1100).getBytes(), 2, 1, Map.of()); + Model model1 = new Model("m_1", ByteSizeValue.ofMb(1200).getBytes(), 3, 1, Map.of("n_1", 2, "n_2", 1), 0); + Model model2 = new Model("m_2", ByteSizeValue.ofMb(1100).getBytes(), 2, 1, Map.of(), 0); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(model1, model2)).computePlan(); { assertThat(assignmentPlan.assignments(model1).isPresent(), is(true)); @@ -362,6 +367,131 @@ public void testGivenLargerModelWithPreviousAssignmentsAndSmallerModelWithoutAss } } + public void testModelWithoutCurrentAllocationsGetsAssignedIfAllocatedPreviously() { + Node node1 = new Node("n_1", ByteSizeValue.ofGb(4).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofGb(4).getBytes(), 2); + Model model1 = new Model("m_1", ByteSizeValue.ofMb(1200).getBytes(), 3, 1, Map.of("n_1", 2, "n_2", 1), 3); + Model model2 = new Model("m_2", ByteSizeValue.ofMb(1100).getBytes(), 1, 2, Map.of(), 1); + + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(model1, model2)).computePlan(); + + Map> indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + } + + public void testGivenPreviouslyAssignedModels_CannotAllBeAllocated() { + Node node1 = new Node("n_1", ByteSizeValue.ofGb(2).getBytes(), 2); + Model model1 = new Model("m_1", ByteSizeValue.ofMb(1200).getBytes(), 1, 1, Map.of(), 1); + Model model2 = new Model("m_2", ByteSizeValue.ofMb(1100).getBytes(), 1, 1, Map.of(), 1); + + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1), List.of(model1, model2)).computePlan(); + + assertThat(assignmentPlan.countPreviouslyAssignedModelsThatAreStillAssigned(), equalTo(1L)); + } + + public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1200).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1200).getBytes(), 2); + Model model1 = new Model("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0); + Model model2 = new Model("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0); + Model model3 = new Model("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0); + + // First only start m_1 + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(model1)).computePlan(); + + Map> indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + + // Then start m_2 + assignmentPlan = new AssignmentPlanner( + List.of(node1, node2), + Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(model2)).toList() + ).computePlan(); + + indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + + // Then start m_3 + assignmentPlan = new AssignmentPlanner( + List.of(node1, node2), + Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(model3)).toList() + ).computePlan(); + + indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2", "m_3")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1))); + + // Now the cluster starts getting resized. + Node node3 = new Node("n_3", ByteSizeValue.ofMb(2400).getBytes(), 2); + Node node4 = new Node("n_4", ByteSizeValue.ofMb(2400).getBytes(), 2); + + // First, one node goes away. + assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan(); + + // Then, a node double in memory size is added. + assignmentPlan = new AssignmentPlanner(List.of(node1, node3), createModelsFromPlan(assignmentPlan)).computePlan(); + // And another. + assignmentPlan = new AssignmentPlanner(List.of(node1, node3, node4), createModelsFromPlan(assignmentPlan)).computePlan(); + // Finally, the remaining smaller node is removed + assignmentPlan = new AssignmentPlanner(List.of(node3, node4), createModelsFromPlan(assignmentPlan)).computePlan(); + + indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2", "m_3")); + assertThat(indexedBasedPlan.get("m_1").values().stream().mapToInt(Integer::intValue).sum(), greaterThanOrEqualTo(1)); + assertThat(indexedBasedPlan.get("m_2").values().stream().mapToInt(Integer::intValue).sum(), greaterThanOrEqualTo(1)); + assertThat(indexedBasedPlan.get("m_3").values().stream().mapToInt(Integer::intValue).sum(), greaterThanOrEqualTo(1)); + + // Assert that all cores are utilized + assertThat(assignmentPlan.getRemainingNodeCores("n_1"), equalTo(0)); + assertThat(assignmentPlan.getRemainingNodeCores("n_2"), equalTo(0)); + } + + private static List createModelsFromPlan(AssignmentPlan plan) { + List models = new ArrayList<>(); + for (Model m : plan.models()) { + Optional> assignments = plan.assignments(m); + Map currentAllocations = Map.of(); + if (assignments.isPresent()) { + currentAllocations = new HashMap<>(); + for (Map.Entry nodeAssignments : assignments.get().entrySet()) { + currentAllocations.put(nodeAssignments.getKey().id(), nodeAssignments.getValue()); + } + } + int totalAllocations = currentAllocations.values().stream().mapToInt(Integer::intValue).sum(); + models.add( + new Model( + m.id(), + m.memoryBytes(), + m.allocations(), + m.threadsPerAllocation(), + currentAllocations, + Math.max(m.maxAssignedAllocations(), totalAllocations) + ) + ); + } + return models; + } + + private static Map> convertToIdIndexed(AssignmentPlan plan) { + Map> result = new HashMap<>(); + for (Model m : plan.models()) { + Optional> assignments = plan.assignments(m); + Map allocationsPerNodeId = assignments.isPresent() ? new HashMap<>() : Map.of(); + for (Map.Entry nodeAssignments : assignments.orElse(Map.of()).entrySet()) { + allocationsPerNodeId.put(nodeAssignments.getKey().id(), nodeAssignments.getValue()); + } + result.put(m.id(), allocationsPerNodeId); + } + return result; + } + private static void assertModelFullyAssignedToNode(AssignmentPlan plan, Model m, Node n) { Optional> assignments = plan.assignments(m); assertThat(assignments.isPresent(), is(true)); @@ -395,12 +525,14 @@ private List randomModels(int scale, double load, List nodes) { } private static Model randomModel(String idSuffix) { + int allocations = randomIntBetween(1, 32); return new Model( "m_" + idSuffix, randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(10).getBytes()), randomIntBetween(1, 32), randomIntBetween(1, 4), - Map.of() + Map.of(), + 0 ); } @@ -417,7 +549,7 @@ private static void assertPreviousAssignmentsAreSatisfied(List models, As allocations += e.getValue(); } assertThat(m.currentAllocationsByNodeId().keySet(), everyItem(in(assignedNodeIds))); - assertThat(allocations, greaterThanOrEqualTo(m.getPreviouslyAssignedAllocations())); + assertThat(allocations, greaterThanOrEqualTo(m.getCurrentAssignedAllocations())); } } @@ -428,7 +560,7 @@ private void runTooManyNodesAndModels(int nodesSize, int modelsSize) { } List models = new ArrayList<>(); for (int i = 0; i < modelsSize; i++) { - models.add(new Model("m_" + i, ByteSizeValue.ofMb(200).getBytes(), 2, 1, Map.of())); + models.add(new Model("m_" + i, ByteSizeValue.ofMb(200).getBytes(), 2, 1, Map.of(), 0)); } // Check plan is computed without OOM exception diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java index 7add808f37978..8a798b4e469ae 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java @@ -24,8 +24,8 @@ public class PreserveAllAllocationsTests extends ESTestCase { public void testGivenNoPreviousAssignments() { Node node1 = new Node("n_1", 100, 4); Node node2 = new Node("n_2", 100, 4); - Model model1 = new Model("m_1", 30, 2, 1, Map.of()); - Model model2 = new Model("m_2", 30, 2, 4, Map.of()); + Model model1 = new Model("m_1", 30, 2, 1, Map.of(), 0); + Model model2 = new Model("m_2", 30, 2, 4, Map.of(), 0); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node1, node2), List.of(model1, model2)); List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); @@ -38,8 +38,8 @@ public void testGivenNoPreviousAssignments() { public void testGivenPreviousAssignments() { Node node1 = new Node("n_1", 100, 8); Node node2 = new Node("n_2", 100, 8); - Model model1 = new Model("m_1", 30, 2, 1, Map.of("n_1", 1)); - Model model2 = new Model("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2)); + Model model1 = new Model("m_1", 30, 2, 1, Map.of("n_1", 1), 1); + Model model2 = new Model("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2), 3); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node1, node2), List.of(model1, model2)); List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); @@ -86,7 +86,7 @@ public void testGivenPreviousAssignments() { public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments() { Node node = new Node("n_1", 100, 4); - Model model = new Model("m_1", 30, 2, 2, Map.of("n_1", 2)); + Model model = new Model("m_1", 30, 2, 2, Map.of("n_1", 2), 2); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node), List.of(model)); AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(model)).build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java index 7c8ea92cd8d49..655f8a6ecf05a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java @@ -24,8 +24,8 @@ public class PreserveOneAllocationTests extends ESTestCase { public void testGivenNoPreviousAssignments() { Node node1 = new Node("n_1", 100, 4); Node node2 = new Node("n_2", 100, 4); - Model model1 = new Model("m_1", 30, 2, 1, Map.of()); - Model model2 = new Model("m_2", 30, 2, 4, Map.of()); + Model model1 = new Model("m_1", 30, 2, 1, Map.of(), 0); + Model model2 = new Model("m_2", 30, 2, 4, Map.of(), 0); PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node1, node2), List.of(model1, model2)); List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); @@ -38,8 +38,8 @@ public void testGivenNoPreviousAssignments() { public void testGivenPreviousAssignments() { Node node1 = new Node("n_1", 100, 8); Node node2 = new Node("n_2", 100, 8); - Model model1 = new Model("m_1", 30, 2, 1, Map.of("n_1", 1)); - Model model2 = new Model("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2)); + Model model1 = new Model("m_1", 30, 2, 1, Map.of("n_1", 1), 1); + Model model2 = new Model("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2), 3); PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node1, node2), List.of(model1, model2)); List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); @@ -87,7 +87,7 @@ public void testGivenPreviousAssignments() { public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments() { Node node = new Node("n_1", 100, 4); - Model model = new Model("m_1", 30, 2, 2, Map.of("n_1", 2)); + Model model = new Model("m_1", 30, 2, 2, Map.of("n_1", 2), 2); PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node), List.of(model)); AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(model)).build(); From 6f62d5bd4acddc7bb8cbf766769a91f5b4202a1b Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Wed, 3 Aug 2022 11:49:38 +0200 Subject: [PATCH 074/265] Refactor authentication handling for grant actions (#88944) This PR establishes a cleaner contract between `TransportGrantAction` and its sub-classes: it instruments checking grant authentication instead of requiring this of sub-classes, and allows these to override a handler method for successfully granted authentication. Closes #88636 --- .../security/action/TransportGrantAction.java | 28 +++++++++++++------ .../TransportBaseUpdateApiKeyAction.java | 2 +- .../apikey/TransportGrantApiKeyAction.java | 28 ++++++++----------- .../TransportActivateProfileAction.java | 15 +++++----- 4 files changed, 41 insertions(+), 32 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java index 358b2c73a70ee..2ae1e7b89025c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.GrantRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; @@ -49,18 +50,19 @@ public TransportGrantAction( this.threadContext = threadContext; } - protected void executeWithGrantAuthentication(GrantRequest grantRequest, ActionListener listener) { + @Override + public final void doExecute(Task task, Request request, ActionListener listener) { try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - final AuthenticationToken authenticationToken = grantRequest.getGrant().getAuthenticationToken(); + final AuthenticationToken authenticationToken = request.getGrant().getAuthenticationToken(); assert authenticationToken != null : "authentication token must not be null"; if (authenticationToken == null) { listener.onFailure( - new ElasticsearchSecurityException("the grant type [{}] is not supported", grantRequest.getGrant().getType()) + new ElasticsearchSecurityException("the grant type [{}] is not supported", request.getGrant().getType()) ); return; } - final String runAsUsername = grantRequest.getGrant().getRunAsUsername(); + final String runAsUsername = request.getGrant().getRunAsUsername(); final ActionListener authenticationListener = ActionListener.wrap(authentication -> { if (authentication.isRunAs()) { @@ -73,12 +75,15 @@ protected void executeWithGrantAuthentication(GrantRequest grantRequest, ActionL } else { // Authentication can be run-as even when runAsUsername is null. // This can happen when the authentication itself is a run-as client-credentials token. - assert runAsUsername != null || "access_token".equals(grantRequest.getGrant().getType()); + assert runAsUsername != null || "access_token".equals(request.getGrant().getType()); authorizationService.authorize( authentication, AuthenticateAction.NAME, new AuthenticateRequest(effectiveUsername), - ActionListener.wrap(ignore2 -> listener.onResponse(authentication), listener::onFailure) + ActionListener.wrap( + ignore2 -> doExecuteWithGrantAuthentication(task, request, authentication, listener), + listener::onFailure + ) ); } } else { @@ -88,7 +93,7 @@ protected void executeWithGrantAuthentication(GrantRequest grantRequest, ActionL new ElasticsearchStatusException("the provided grant credentials do not support run-as", RestStatus.BAD_REQUEST) ); } else { - listener.onResponse(authentication); + doExecuteWithGrantAuthentication(task, request, authentication, listener); } } }, listener::onFailure); @@ -98,7 +103,7 @@ protected void executeWithGrantAuthentication(GrantRequest grantRequest, ActionL } authenticationService.authenticate( actionName, - grantRequest, + request, authenticationToken, ActionListener.runBefore(authenticationListener, authenticationToken::clearCredentials) ); @@ -106,4 +111,11 @@ protected void executeWithGrantAuthentication(GrantRequest grantRequest, ActionL listener.onFailure(e); } } + + protected abstract void doExecuteWithGrantAuthentication( + Task task, + Request request, + Authentication authentication, + ActionListener listener + ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java index d0e00970a9d4a..978c71e6601bb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java @@ -45,7 +45,7 @@ protected TransportBaseUpdateApiKeyAction( } @Override - protected void doExecute(Task task, Request request, ActionListener listener) { + public final void doExecute(Task task, Request request, ActionListener listener) { final var authentication = securityContext.getAuthentication(); if (authentication == null) { listener.onFailure(new IllegalStateException("authentication is required")); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyAction.java index 583f1a79efdf0..a6401053634b2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.security.action.TransportGrantAction; import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.AuthenticationService; @@ -78,22 +79,17 @@ public TransportGrantApiKeyAction( } @Override - protected void doExecute(Task task, GrantApiKeyRequest request, ActionListener listener) { - executeWithGrantAuthentication( - request, - listener.delegateFailure( - (l, authentication) -> resolver.resolveUserRoleDescriptors( - authentication, - ActionListener.wrap( - roleDescriptors -> apiKeyService.createApiKey( - authentication, - request.getApiKeyRequest(), - roleDescriptors, - listener - ), - listener::onFailure - ) - ) + protected void doExecuteWithGrantAuthentication( + Task task, + GrantApiKeyRequest request, + Authentication authentication, + ActionListener listener + ) { + resolver.resolveUserRoleDescriptors( + authentication, + ActionListener.wrap( + roleDescriptors -> apiKeyService.createApiKey(authentication, request.getApiKeyRequest(), roleDescriptors, listener), + listener::onFailure ) ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportActivateProfileAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportActivateProfileAction.java index 6014b8d04adce..d7241011d9c09 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportActivateProfileAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportActivateProfileAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileAction; import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileRequest; import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.security.action.TransportGrantAction; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authz.AuthorizationService; @@ -47,12 +48,12 @@ public TransportActivateProfileAction( } @Override - protected void doExecute(Task task, ActivateProfileRequest request, ActionListener listener) { - executeWithGrantAuthentication( - request, - listener.delegateFailure( - (l, authentication) -> profileService.activateProfile(authentication, l.map(ActivateProfileResponse::new)) - ) - ); + protected void doExecuteWithGrantAuthentication( + Task task, + ActivateProfileRequest request, + Authentication authentication, + ActionListener listener + ) { + profileService.activateProfile(authentication, listener.map(ActivateProfileResponse::new)); } } From ec8af834f48896b1da44d7b5ffe160e62f06e038 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Wed, 3 Aug 2022 11:02:27 +0100 Subject: [PATCH 075/265] Write docs about our Docker build setup (#89036) Add a README.md to the docker distribution project, with details about how we build and test our images. --- distribution/docker/README.md | 132 +++++++++++++++++++++++++++++++ distribution/docker/build.gradle | 4 +- 2 files changed, 134 insertions(+), 2 deletions(-) create mode 100644 distribution/docker/README.md diff --git a/distribution/docker/README.md b/distribution/docker/README.md new file mode 100644 index 0000000000000..2e22fe099f4f5 --- /dev/null +++ b/distribution/docker/README.md @@ -0,0 +1,132 @@ +# Elasticsearch Docker Distribution + +The ES build can generate several types of Docker image. These are enumerated in +the [DockerBase] enum. + + * Default - this is what most people use, and is based on Ubuntu + * UBI - the same as the default image, but based upon [RedHat's UBI + images][ubi], specifically their minimal flavour. + * Iron Bank - this is the US Department of Defence's repository of digitally + signed, binary container images including both Free and Open-Source + software (FOSS) and Commercial off-the-shelf (COTS). In practice, this is + another UBI build, this time on the regular UBI image, with extra + hardening. See below for more details. + * Cloud - this is mostly the same as the default image, with some notable differences: + * `filebeat` and `metricbeat` are included + * `wget` is included + * The `ENTRYPOINT` is just `/bin/tini`, and the `CMD` is + `/app/elasticsearc.sh`. In normal use this file would be bind-mounted + in, but the image ships a stub version of this file so that the image + can still be tested. + * Cloud ESS - this directly extends the Cloud image, and adds all ES plugins + that the ES build generates in an archive directory. It also sets an + environment variable that points at this directory. This allows plugins to + be installed from the archive instead of the internet, speeding up + deployment times. + +The long-term goal is for both Cloud images to be retired in favour of the +default image. + + +## Build strategy + +For all image flavours, the ES build implements a pipeline: + + 1. Construct a Docker build context + 2. Transform the build context so that it is possible to build it locally + 3. Build the Docker image locally + +Some images use (1) as the releasable artifact, some use (3). + +**NOTE:** "Pipeline" isn't actually the correct term - in reality, each Gradle +task depends on the one before it. Notably, it is the transform tasks that +depend on a locally build `.tar.gz` Elasticsearch archive. + + +## Releasing on Docker Hub + +Elasticsearch is an [official image on Docker +Hub](https://hub.docker.com/_/elasticsearch). On release day, we build the ES +Docker image and upload it to [Elastic's Docker +registry](https://www.docker.elastic.co/). Separately, we submit a build context +to Docker via the [elastic/dockerfiles](https://github.com/elastic/dockerfiles) +repository. Docker then builds the image, and uploads it to Docker Hub. +Unfortunately, this is an asynchronous process, and we don't hear back if +there's a failure, so even when everything works, there's a lag between +releasing a new version of Elasticsearch, and the image being available on +Docker Hub. + +Being an official image puts additional constraints on how the Elasticsearch +image is built. + + * It must extend another official image + * It must fetch any required artifacts - they cannot be supplied in the build + context. + * It must be platform-independent i.e. it can build on ARM and x64 + +The transform step in the [build strategy](#build-strategy) above replaces the +`curl` command in the `Dockerfile` that fetches an Elasticsearch `.tar.gz` +distribution with a `COPY` command, so that it is possible to build the ES image +locally. + +## Iron Bank release process + +Elastic does not release an Iron Bank image. Rather, for each release we provide +a Docker build context, and Iron Bank build the image themselves using a custom +build process. + +The ES build still has a task to build an Iron Bank image, in order to test +something close to what Iron Bank build. The ES build does this by transforming +the files in the Docker build context slightly, and passing usable values for +the build variables (we use the regular UBI image instead of the DoD one). + +The important takeaway here is that the releasable artifact is the Iron Bank +build context, not the image. + + +## Multi-architecture images + +We publish [multi-architecture images][multi-arch] images, for use on both +`x86_64` (Intel) and `aarch64` (ARM). This works by essentially building two +images, and combining them with a Docker manifest. The Elasticsearch Delivery +team aren't responsible for this - rather, it happens during our unified release +process. + + +## Testing + +We have a suite of tests in the [qa/os](../../qa/os) subproject. Most of the +Docker tests are in the [DockerTests] class, but there are tests that use Docker +in other test classes. + +The tests are mostly concerned with ensuring that the image has been built +correctly e.g. contents and permissions are correct. We also check that the +custom behaviour in the +[docker-entrypoint.sh](src/docker/bin/docker-entrypoint.sh) works as intended. + + +## Reliability + +We go to some lengths to try and make the Docker build resilient to transient +network errors. This is why, when browsing the +[Dockerfile](src/docker/Dockerfile), you'll see many commands wrapped in looping +logic, so that if e.g. package installation fails, we try again. We also perform +explicit `docker pull` commands instead of relying on `docker run` to pull an +image down automatically, so that we can wrap the `pull` part in a retry. + + +## What are the export project for? + +Our integration tests are set up so that the test task depends on the project +that creates the required artifacts. Note, it doesn't depend on a task, but a +project! Also, we used to use Vagrant for testing (this has largely since been +abandoned), which meant we needed to be able to build an image locally, export +it, and load it again inside a Vagrant VM. + +Ideally this import / export stuff should be completely removed. + + +[DockerBase]: ../../build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +[DockerTests]: ../../qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +[multi-arch]: https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/ +[ubi]: https://developers.redhat.com/products/rhel/ubi diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index a1217e391a589..ccb23b554ea84 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -286,8 +286,8 @@ void addTransformDockerContextTask(Architecture architecture, DockerBase base) { from(tarTree("${project.buildDir}/distributions/${archiveName}.tar.gz")) { eachFile { FileCopyDetails details -> if (details.name.equals("Dockerfile")) { - filter { String filename -> - return filename.replaceAll('^RUN curl.*artifacts-no-kpi.*$', "COPY ${distributionName} /tmp/elasticsearch.tar.gz") + filter { String contents -> + return contents.replaceAll('^RUN curl.*artifacts-no-kpi.*$', "COPY ${distributionName} /tmp/elasticsearch.tar.gz") } } } From 92852495335f7d065a022ce61ade835fb7cf8a4d Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Wed, 3 Aug 2022 11:15:50 +0100 Subject: [PATCH 076/265] Wrap code in new tracing contexts where required (#88920) Part of #84369. Split out from #88443. This PR wraps parts of the code in a new tracing context. This is necessary so that a tracing implementation can use the thread context to propagate tracing headers, but without the code attempting to set the same key twice in the thread context, which is illegal. In order to avoid future diff noise, the wrapped code has mostly been refactored into methods. Note that in some places we actually clear the tracing context completely. This is done where the operation to be performed should have no association with the current trace context. For example, when creating a new index via a REST request, the resulting background tasks for the index should not be associated with the REST request in perpetuity. --- .../cluster/InternalClusterInfoService.java | 80 +++--- .../cluster/service/MasterService.java | 255 +++++++++--------- .../common/util/concurrent/ThreadContext.java | 28 +- .../org/elasticsearch/index/IndexService.java | 12 +- .../RetentionLeaseBackgroundSyncAction.java | 80 +++--- .../index/seqno/RetentionLeaseSyncAction.java | 66 ++--- .../index/shard/PrimaryReplicaSyncer.java | 29 ++ .../PersistentTasksNodeService.java | 14 + .../persistent/StartPersistentTaskAction.java | 1 + .../transport/InboundHandler.java | 47 ++-- .../transport/TransportService.java | 14 +- .../PersistentTasksNodeServiceTests.java | 29 +- 12 files changed, 387 insertions(+), 268 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 71129b0ba0b37..ba9d87e7d1d6c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -176,42 +176,16 @@ void execute() { assert countDown.isCountedDown() == false; logger.trace("starting async refresh"); - final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true"); - nodesStatsRequest.clear(); - nodesStatsRequest.addMetric(NodesStatsRequest.Metric.FS.metricName()); - nodesStatsRequest.timeout(fetchTimeout); - client.admin().cluster().nodesStats(nodesStatsRequest, ActionListener.runAfter(new ActionListener<>() { - @Override - public void onResponse(NodesStatsResponse nodesStatsResponse) { - logger.trace("received node stats response"); - - for (final FailedNodeException failure : nodesStatsResponse.failures()) { - logger.warn(() -> "failed to retrieve stats for node [" + failure.nodeId() + "]", failure.getCause()); - } - - Map leastAvailableUsagesBuilder = new HashMap<>(); - Map mostAvailableUsagesBuilder = new HashMap<>(); - fillDiskUsagePerNode( - adjustNodesStats(nodesStatsResponse.getNodes()), - leastAvailableUsagesBuilder, - mostAvailableUsagesBuilder - ); - leastAvailableSpaceUsages = Map.copyOf(leastAvailableUsagesBuilder); - mostAvailableSpaceUsages = Map.copyOf(mostAvailableUsagesBuilder); - } + try (var ignored = threadPool.getThreadContext().clearTraceContext()) { + fetchNodeStats(); + } - @Override - public void onFailure(Exception e) { - if (e instanceof ClusterBlockException) { - logger.trace("failed to retrieve node stats", e); - } else { - logger.warn("failed to retrieve node stats", e); - } - leastAvailableSpaceUsages = Map.of(); - mostAvailableSpaceUsages = Map.of(); - } - }, this::onStatsProcessed)); + try (var ignored = threadPool.getThreadContext().clearTraceContext()) { + fetchIndicesStats(); + } + } + private void fetchIndicesStats() { final IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); indicesStatsRequest.clear(); indicesStatsRequest.store(true); @@ -289,6 +263,44 @@ public void onFailure(Exception e) { }, this::onStatsProcessed)); } + private void fetchNodeStats() { + final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true"); + nodesStatsRequest.clear(); + nodesStatsRequest.addMetric(NodesStatsRequest.Metric.FS.metricName()); + nodesStatsRequest.timeout(fetchTimeout); + client.admin().cluster().nodesStats(nodesStatsRequest, ActionListener.runAfter(new ActionListener<>() { + @Override + public void onResponse(NodesStatsResponse nodesStatsResponse) { + logger.trace("received node stats response"); + + for (final FailedNodeException failure : nodesStatsResponse.failures()) { + logger.warn(() -> "failed to retrieve stats for node [" + failure.nodeId() + "]", failure.getCause()); + } + + Map leastAvailableUsagesBuilder = new HashMap<>(); + Map mostAvailableUsagesBuilder = new HashMap<>(); + fillDiskUsagePerNode( + adjustNodesStats(nodesStatsResponse.getNodes()), + leastAvailableUsagesBuilder, + mostAvailableUsagesBuilder + ); + leastAvailableSpaceUsages = Map.copyOf(leastAvailableUsagesBuilder); + mostAvailableSpaceUsages = Map.copyOf(mostAvailableUsagesBuilder); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof ClusterBlockException) { + logger.trace("failed to retrieve node stats", e); + } else { + logger.warn("failed to retrieve node stats", e); + } + leastAvailableSpaceUsages = Map.of(); + mostAvailableSpaceUsages = Map.of(); + } + }, this::onStatsProcessed)); + } + private void onStatsProcessed() { if (countDown.countDown()) { logger.trace("stats all received, computing cluster info and notifying listeners"); diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 922b39ac71fd9..23dd0d14e1fea 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -283,144 +283,157 @@ private void runTasks( logExecutionTime(executionTime, "notify listeners on unchanged cluster state", summary); clusterStateUpdateStatsTracker.onUnchangedClusterState(computationTime.millis(), executionTime.millis()); } else { - final Task task = taskManager.register("master", STATE_UPDATE_ACTION_NAME, new TaskAwareRequest() { - @Override - public void setParentTask(TaskId taskId) {} + try (var ignored = threadPool.getThreadContext().newTraceContext()) { + publishClusterStateUpdate(executor, summary, previousClusterState, executionResults, newClusterState, computationTime); + } + } + } - @Override - public TaskId getParentTask() { - return TaskId.EMPTY_TASK_ID; - } + private void publishClusterStateUpdate( + ClusterStateTaskExecutor executor, + BatchSummary summary, + ClusterState previousClusterState, + List> executionResults, + ClusterState newClusterState, + TimeValue computationTime + ) { + final Task task = taskManager.register("master", STATE_UPDATE_ACTION_NAME, new TaskAwareRequest() { + @Override + public void setParentTask(TaskId taskId) {} - @Override - public String getDescription() { - return "publication of cluster state [" + newClusterState.getVersion() + "]"; - } - }); + @Override + public TaskId getParentTask() { + return TaskId.EMPTY_TASK_ID; + } + + @Override + public String getDescription() { + return "publication of cluster state [" + newClusterState.getVersion() + "]"; + } + }); + try { + if (logger.isTraceEnabled()) { + logger.trace("cluster state updated, source [{}]\n{}", summary, newClusterState); + } else { + logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), summary); + } + final long publicationStartTime = threadPool.rawRelativeTimeInMillis(); try { - if (logger.isTraceEnabled()) { - logger.trace("cluster state updated, source [{}]\n{}", summary, newClusterState); - } else { - logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), summary); + final ClusterStatePublicationEvent clusterStatePublicationEvent = new ClusterStatePublicationEvent( + summary, + previousClusterState, + newClusterState, + task, + computationTime.millis(), + publicationStartTime + ); + + // new cluster state, notify all listeners + final DiscoveryNodes.Delta nodesDelta = newClusterState.nodes().delta(previousClusterState.nodes()); + if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { + String nodesDeltaSummary = nodesDelta.shortSummary(); + if (nodesDeltaSummary.length() > 0) { + logger.info( + "{}, term: {}, version: {}, delta: {}", + summary, + newClusterState.term(), + newClusterState.version(), + nodesDeltaSummary + ); + } } - final long publicationStartTime = threadPool.rawRelativeTimeInMillis(); - try { - final ClusterStatePublicationEvent clusterStatePublicationEvent = new ClusterStatePublicationEvent( - summary, - previousClusterState, - newClusterState, - task, - computationTime.millis(), - publicationStartTime - ); - - // new cluster state, notify all listeners - final DiscoveryNodes.Delta nodesDelta = newClusterState.nodes().delta(previousClusterState.nodes()); - if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { - String nodesDeltaSummary = nodesDelta.shortSummary(); - if (nodesDeltaSummary.length() > 0) { - logger.info( - "{}, term: {}, version: {}, delta: {}", - summary, - newClusterState.term(), - newClusterState.version(), - nodesDeltaSummary + + logger.debug("publishing cluster state version [{}]", newClusterState.version()); + // initialize routing nodes and the indices lookup concurrently, we will need both of them for the cluster state + // application and can compute them while we wait for the other nodes during publication + newClusterState.initializeAsync(threadPool.generic()); + publish( + clusterStatePublicationEvent, + new CompositeTaskAckListener( + executionResults.stream() + .map(ExecutionResult::getContextPreservingAckListener) + .filter(Objects::nonNull) + .map( + contextPreservingAckListener -> new TaskAckListener( + contextPreservingAckListener, + newClusterState.version(), + newClusterState.nodes(), + threadPool + ) + ) + .toList() + ), + new ActionListener<>() { + @Override + public void onResponse(Void unused) { + final long notificationStartTime = threadPool.rawRelativeTimeInMillis(); + for (final var executionResult : executionResults) { + executionResult.onPublishSuccess(newClusterState); + } + + try { + executor.clusterStatePublished(newClusterState); + } catch (Exception e) { + logger.error( + () -> format( + "exception thrown while notifying executor of new cluster state publication [%s]", + summary + ), + e + ); + } + final TimeValue executionTime = getTimeSince(notificationStartTime); + logExecutionTime( + executionTime, + "notify listeners on successful publication of cluster state (version: " + + newClusterState.version() + + ", uuid: " + + newClusterState.stateUUID() + + ')', + summary + ); + clusterStateUpdateStatsTracker.onPublicationSuccess( + threadPool.rawRelativeTimeInMillis(), + clusterStatePublicationEvent, + executionTime.millis() ); } - } - logger.debug("publishing cluster state version [{}]", newClusterState.version()); - // initialize routing nodes and the indices lookup concurrently, we will need both of them for the cluster state - // application and can compute them while we wait for the other nodes during publication - newClusterState.initializeAsync(threadPool.generic()); - publish( - clusterStatePublicationEvent, - new CompositeTaskAckListener( - executionResults.stream() - .map(ExecutionResult::getContextPreservingAckListener) - .filter(Objects::nonNull) - .map( - contextPreservingAckListener -> new TaskAckListener( - contextPreservingAckListener, - newClusterState.version(), - newClusterState.nodes(), - threadPool - ) - ) - .toList() - ), - new ActionListener<>() { - @Override - public void onResponse(Void unused) { + @Override + public void onFailure(Exception exception) { + if (exception instanceof FailedToCommitClusterStateException failedToCommitClusterStateException) { final long notificationStartTime = threadPool.rawRelativeTimeInMillis(); + final long version = newClusterState.version(); + logger.warn( + () -> format("failing [%s]: failed to commit cluster state version [%s]", summary, version), + exception + ); for (final var executionResult : executionResults) { - executionResult.onPublishSuccess(newClusterState); + executionResult.onPublishFailure(failedToCommitClusterStateException); } - - try { - executor.clusterStatePublished(newClusterState); - } catch (Exception e) { - logger.error( - () -> format( - "exception thrown while notifying executor of new cluster state publication [%s]", - summary - ), - e - ); - } - final TimeValue executionTime = getTimeSince(notificationStartTime); - logExecutionTime( - executionTime, - "notify listeners on successful publication of cluster state (version: " - + newClusterState.version() - + ", uuid: " - + newClusterState.stateUUID() - + ')', - summary + final long notificationMillis = threadPool.rawRelativeTimeInMillis() - notificationStartTime; + clusterStateUpdateStatsTracker.onPublicationFailure( + threadPool.rawRelativeTimeInMillis(), + clusterStatePublicationEvent, + notificationMillis ); - clusterStateUpdateStatsTracker.onPublicationSuccess( + } else { + assert publicationMayFail() : exception; + clusterStateUpdateStatsTracker.onPublicationFailure( threadPool.rawRelativeTimeInMillis(), clusterStatePublicationEvent, - executionTime.millis() + 0L ); - } - - @Override - public void onFailure(Exception exception) { - if (exception instanceof FailedToCommitClusterStateException failedToCommitClusterStateException) { - final long notificationStartTime = threadPool.rawRelativeTimeInMillis(); - final long version = newClusterState.version(); - logger.warn( - () -> format("failing [%s]: failed to commit cluster state version [%s]", summary, version), - exception - ); - for (final var executionResult : executionResults) { - executionResult.onPublishFailure(failedToCommitClusterStateException); - } - final long notificationMillis = threadPool.rawRelativeTimeInMillis() - notificationStartTime; - clusterStateUpdateStatsTracker.onPublicationFailure( - threadPool.rawRelativeTimeInMillis(), - clusterStatePublicationEvent, - notificationMillis - ); - } else { - assert publicationMayFail() : exception; - clusterStateUpdateStatsTracker.onPublicationFailure( - threadPool.rawRelativeTimeInMillis(), - clusterStatePublicationEvent, - 0L - ); - handleException(summary, publicationStartTime, newClusterState, exception); - } + handleException(summary, publicationStartTime, newClusterState, exception); } } - ); - } catch (Exception e) { - handleException(summary, publicationStartTime, newClusterState, e); - } - } finally { - taskManager.unregister(task); + } + ); + } catch (Exception e) { + handleException(summary, publicationStartTime, newClusterState, e); } + } finally { + taskManager.unregister(task); } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 67662894ce907..ecfb4bf60c76e 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -532,14 +532,26 @@ public void addResponseHeader(final String key, final String value, final Functi * command has already been passed through this method then it is returned unaltered rather than wrapped twice. */ public Runnable preserveContext(Runnable command) { + return doPreserveContext(command, false); + } + + /** + * Saves the current thread context and wraps command in a Runnable that restores that context before running command. Also + * starts a new tracing context durin executing. If command has already been wrapped then it is returned unaltered. + */ + public Runnable preserveContextWithTracing(Runnable command) { + return doPreserveContext(command, true); + } + + private Runnable doPreserveContext(Runnable command, boolean preserveContext) { if (command instanceof ContextPreservingAbstractRunnable) { return command; } if (command instanceof ContextPreservingRunnable) { return command; } - if (command instanceof AbstractRunnable) { - return new ContextPreservingAbstractRunnable((AbstractRunnable) command); + if (command instanceof AbstractRunnable abstractRunnable) { + return new ContextPreservingAbstractRunnable(abstractRunnable, preserveContext); } return new ContextPreservingRunnable(command); } @@ -821,17 +833,20 @@ public Runnable unwrap() { } /** - * Wraps an AbstractRunnable to preserve the thread context. + * Wraps an AbstractRunnable to preserve the thread context, optionally creating a new trace context before + * executing. */ private class ContextPreservingAbstractRunnable extends AbstractRunnable implements WrappedRunnable { private final AbstractRunnable in; private final ThreadContext.StoredContext creatorsContext; + private final boolean useNewTraceContext; private ThreadContext.StoredContext threadsOriginalContext = null; - private ContextPreservingAbstractRunnable(AbstractRunnable in) { + private ContextPreservingAbstractRunnable(AbstractRunnable in, boolean useNewTraceContext) { creatorsContext = newStoredContext(false); this.in = in; + this.useNewTraceContext = useNewTraceContext; } @Override @@ -864,6 +879,11 @@ public void onRejection(Exception e) { protected void doRun() throws Exception { threadsOriginalContext = stashContext(); creatorsContext.restore(); + if (useNewTraceContext) { + // Discard the return value - we'll restore threadsOriginalContext in `onAfter()`. + // noinspection resource + newTraceContext(); + } in.doRun(); } diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index efc1075db0084..1d6379c973cf3 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -241,11 +241,13 @@ public IndexService( this.readerWrapper = wrapperFactory.apply(this); this.searchOperationListeners = Collections.unmodifiableList(searchOperationListeners); this.indexingOperationListeners = Collections.unmodifiableList(indexingOperationListeners); - // kick off async ops for the first shard in this index - this.refreshTask = new AsyncRefreshTask(this); - this.trimTranslogTask = new AsyncTrimTranslogTask(this); - this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); - this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); + try (var ignored = threadPool.getThreadContext().clearTraceContext()) { + // kick off async ops for the first shard in this index + this.refreshTask = new AsyncRefreshTask(this); + this.trimTranslogTask = new AsyncTrimTranslogTask(this); + this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); + this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); + } updateFsyncTaskIfNecessary(); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index 950d0e31d9883..c75cbb0308a86 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -99,47 +99,53 @@ final void backgroundSync(ShardId shardId, String primaryAllocationId, long prim // we have to execute under the system context so that if security is enabled the sync is authorized threadContext.markAsSystemContext(); final Request request = new Request(shardId, retentionLeases); - final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "retention_lease_background_sync", request); - transportService.sendChildRequest( - clusterService.localNode(), - transportPrimaryAction, - new ConcreteShardRequest<>(request, primaryAllocationId, primaryTerm), - task, - transportOptions, - new TransportResponseHandler() { - @Override - public ReplicationResponse read(StreamInput in) throws IOException { - return newResponseInstance(in); - } + try (var ignored = threadContext.newTraceContext()) { + sendRetentionLeaseSyncAction(shardId, primaryAllocationId, primaryTerm, request); + } + } + } - @Override - public void handleResponse(ReplicationResponse response) { - task.setPhase("finished"); - taskManager.unregister(task); - } + private void sendRetentionLeaseSyncAction(ShardId shardId, String primaryAllocationId, long primaryTerm, Request request) { + final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "retention_lease_background_sync", request); + transportService.sendChildRequest( + clusterService.localNode(), + transportPrimaryAction, + new ConcreteShardRequest<>(request, primaryAllocationId, primaryTerm), + task, + transportOptions, + new TransportResponseHandler() { + @Override + public ReplicationResponse read(StreamInput in) throws IOException { + return newResponseInstance(in); + } - @Override - public void handleException(TransportException e) { - task.setPhase("finished"); - taskManager.unregister(task); - if (ExceptionsHelper.unwrap(e, NodeClosedException.class) != null) { - // node shutting down - return; - } - if (ExceptionsHelper.unwrap( - e, - IndexNotFoundException.class, - AlreadyClosedException.class, - IndexShardClosedException.class - ) != null) { - // the index was deleted or the shard is closed - return; - } - getLogger().warn(() -> format("%s retention lease background sync failed", shardId), e); + @Override + public void handleResponse(ReplicationResponse response) { + task.setPhase("finished"); + taskManager.unregister(task); + } + + @Override + public void handleException(TransportException e) { + task.setPhase("finished"); + taskManager.unregister(task); + if (ExceptionsHelper.unwrap(e, NodeClosedException.class) != null) { + // node shutting down + return; + } + if (ExceptionsHelper.unwrap( + e, + IndexNotFoundException.class, + AlreadyClosedException.class, + IndexShardClosedException.class + ) != null) { + // the index was deleted or the shard is closed + return; } + getLogger().warn(() -> format("%s retention lease background sync failed", shardId), e); } - ); - } + } + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index 2c6926377a587..86cfa84b90b88 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -111,42 +111,44 @@ final void sync( // we have to execute under the system context so that if security is enabled the sync is authorized threadContext.markAsSystemContext(); final Request request = new Request(shardId, retentionLeases); - final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "retention_lease_sync", request); - transportService.sendChildRequest( - clusterService.localNode(), - transportPrimaryAction, - new ConcreteShardRequest<>(request, primaryAllocationId, primaryTerm), - task, - transportOptions, - new TransportResponseHandler() { - @Override - public ReplicationResponse read(StreamInput in) throws IOException { - return newResponseInstance(in); - } + try (var ignored = threadContext.newTraceContext()) { + final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "retention_lease_sync", request); + transportService.sendChildRequest( + clusterService.localNode(), + transportPrimaryAction, + new ConcreteShardRequest<>(request, primaryAllocationId, primaryTerm), + task, + transportOptions, + new TransportResponseHandler() { + @Override + public ReplicationResponse read(StreamInput in) throws IOException { + return newResponseInstance(in); + } - @Override - public void handleResponse(ReplicationResponse response) { - task.setPhase("finished"); - taskManager.unregister(task); - listener.onResponse(response); - } + @Override + public void handleResponse(ReplicationResponse response) { + task.setPhase("finished"); + taskManager.unregister(task); + listener.onResponse(response); + } - @Override - public void handleException(TransportException e) { - if (ExceptionsHelper.unwrap( - e, - IndexNotFoundException.class, - AlreadyClosedException.class, - IndexShardClosedException.class - ) == null) { - getLogger().warn(() -> format("%s retention lease sync failed", shardId), e); + @Override + public void handleException(TransportException e) { + if (ExceptionsHelper.unwrap( + e, + IndexNotFoundException.class, + AlreadyClosedException.class, + IndexShardClosedException.class + ) == null) { + getLogger().warn(() -> format("%s retention lease sync failed", shardId), e); + } + task.setPhase("finished"); + taskManager.unregister(task); + listener.onFailure(e); } - task.setPhase("finished"); - taskManager.unregister(task); - listener.onFailure(e); } - } - ); + ); + } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index cfebd272a78d7..370871424c201 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -162,6 +162,35 @@ private void resync( ) { ResyncRequest request = new ResyncRequest(shardId, primaryAllocationId); final TaskManager taskManager = transportService.getTaskManager(); + + try (var ignored = transportService.getThreadPool().getThreadContext().newTraceContext()) { + doResync( + shardId, + primaryAllocationId, + primaryTerm, + snapshot, + startingSeqNo, + maxSeqNo, + maxSeenAutoIdTimestamp, + listener, + request, + taskManager + ); + } + } + + private void doResync( + ShardId shardId, + String primaryAllocationId, + long primaryTerm, + Translog.Snapshot snapshot, + long startingSeqNo, + long maxSeqNo, + long maxSeenAutoIdTimestamp, + ActionListener listener, + ResyncRequest request, + TaskManager taskManager + ) { ResyncTask resyncTask = (ResyncTask) taskManager.register("transport", "resync", request); // it's not transport :-) ActionListener wrappedListener = new ActionListener() { @Override diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index eb97e5e86b6f5..dd2bfa489e5b8 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -22,6 +22,7 @@ import org.elasticsearch.tasks.TaskAwareRequest; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -42,6 +43,7 @@ public class PersistentTasksNodeService implements ClusterStateListener { private static final Logger logger = LogManager.getLogger(PersistentTasksNodeService.class); + private final ThreadPool threadPool; private final Map runningTasks = new HashMap<>(); private final PersistentTasksService persistentTasksService; private final PersistentTasksExecutorRegistry persistentTasksExecutorRegistry; @@ -49,11 +51,13 @@ public class PersistentTasksNodeService implements ClusterStateListener { private final NodePersistentTasksExecutor nodePersistentTasksExecutor; public PersistentTasksNodeService( + ThreadPool threadPool, PersistentTasksService persistentTasksService, PersistentTasksExecutorRegistry persistentTasksExecutorRegistry, TaskManager taskManager, NodePersistentTasksExecutor nodePersistentTasksExecutor ) { + this.threadPool = threadPool; this.persistentTasksService = persistentTasksService; this.persistentTasksExecutorRegistry = persistentTasksExecutorRegistry; this.taskManager = taskManager; @@ -182,6 +186,16 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, } }; + try (var ignored = threadPool.getThreadContext().newTraceContext()) { + doStartTask(taskInProgress, executor, request); + } + } + + private void doStartTask( + PersistentTask taskInProgress, + PersistentTasksExecutor executor, + TaskAwareRequest request + ) { AllocatedPersistentTask task; try { task = (AllocatedPersistentTask) taskManager.register("persistent", taskInProgress.getTaskName() + "[c]", request); diff --git a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java index 822f2af17e054..e5a4c4d5d344b 100644 --- a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java @@ -194,6 +194,7 @@ public TransportAction( NodePersistentTasksExecutor executor = new NodePersistentTasksExecutor(threadPool); clusterService.addListener( new PersistentTasksNodeService( + threadPool, persistentTasksService, persistentTasksExecutorRegistry, transportService.getTaskManager(), diff --git a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java index dad3fee65540e..3ef4b5f6769b0 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java @@ -273,36 +273,39 @@ private void handleRequest(TcpChannel channel, Head } final String executor = reg.getExecutor(); if (ThreadPool.Names.SAME.equals(executor)) { - try { - reg.processMessageReceived(request, transportChannel); - } catch (Exception e) { - sendErrorResponse(reg.getAction(), transportChannel, e); + try (var ignored = threadPool.getThreadContext().newTraceContext()) { + try { + reg.processMessageReceived(request, transportChannel); + } catch (Exception e) { + sendErrorResponse(reg.getAction(), transportChannel, e); + } } } else { boolean success = false; request.incRef(); try { - threadPool.executor(executor).execute(new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - reg.processMessageReceived(request, transportChannel); - } + threadPool.executor(executor) + .execute(threadPool.getThreadContext().preserveContextWithTracing(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + reg.processMessageReceived(request, transportChannel); + } - @Override - public boolean isForceExecution() { - return reg.isForceExecution(); - } + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } - @Override - public void onFailure(Exception e) { - sendErrorResponse(reg.getAction(), transportChannel, e); - } + @Override + public void onFailure(Exception e) { + sendErrorResponse(reg.getAction(), transportChannel, e); + } - @Override - public void onAfter() { - request.decRef(); - } - }); + @Override + public void onAfter() { + request.decRef(); + } + })); success = true; } finally { if (success == false) { diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 10d4a959d0744..3a30cbb433506 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -945,16 +945,18 @@ private void sendLocalRequest(long requestId, final String action, final Transpo } final String executor = reg.getExecutor(); if (ThreadPool.Names.SAME.equals(executor)) { - try { - reg.processMessageReceived(request, channel); - } catch (Exception e) { - handleSendToLocalException(channel, e, action); + try (var ignored = threadPool.getThreadContext().newTraceContext()) { + try { + reg.processMessageReceived(request, channel); + } catch (Exception e) { + handleSendToLocalException(channel, e, action); + } } } else { boolean success = false; request.incRef(); try { - threadPool.executor(executor).execute(new AbstractRunnable() { + threadPool.executor(executor).execute(threadPool.getThreadContext().preserveContextWithTracing(new AbstractRunnable() { @Override protected void doRun() throws Exception { reg.processMessageReceived(request, channel); @@ -979,7 +981,7 @@ public String toString() { public void onAfter() { request.decRef(); } - }); + })); success = true; } finally { if (success == false) { diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java index dd4b8cbb0eea2..d3d978cef470b 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java @@ -109,6 +109,7 @@ public void testStartTask() { MockExecutor executor = new MockExecutor(); PersistentTasksNodeService coordinator = new PersistentTasksNodeService( + threadPool, persistentTasksService, registry, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), @@ -224,6 +225,7 @@ public void testParamsStatusAndNodeTaskAreDelegated() throws Exception { MockExecutor executor = new MockExecutor(); PersistentTasksNodeService coordinator = new PersistentTasksNodeService( + threadPool, persistentTasksService, registry, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), @@ -285,7 +287,13 @@ public void sendCompletionRequest( int nonLocalNodesCount = randomInt(10); MockExecutor executor = new MockExecutor(); TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); - PersistentTasksNodeService coordinator = new PersistentTasksNodeService(persistentTasksService, registry, taskManager, executor); + PersistentTasksNodeService coordinator = new PersistentTasksNodeService( + threadPool, + persistentTasksService, + registry, + taskManager, + executor + ); ClusterState state = createInitialClusterState(nonLocalNodesCount, Settings.EMPTY); @@ -374,7 +382,13 @@ public void sendCompletionRequest( int nonLocalNodesCount = randomInt(10); MockExecutor executor = new MockExecutor(); TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); - PersistentTasksNodeService coordinator = new PersistentTasksNodeService(persistentTasksService, registry, taskManager, executor); + PersistentTasksNodeService coordinator = new PersistentTasksNodeService( + threadPool, + persistentTasksService, + registry, + taskManager, + executor + ); ClusterState state = createInitialClusterState(nonLocalNodesCount, Settings.EMPTY); @@ -441,10 +455,10 @@ public void testRegisterTaskFails() throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); final Client mockClient = mock(Client.class); - final ThreadPool threadPool = mock(ThreadPool.class); - when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); - when(threadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); - when(mockClient.threadPool()).thenReturn(threadPool); + final ThreadPool mockThreadPool = mock(ThreadPool.class); + when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(mockThreadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); + when(mockClient.threadPool()).thenReturn(mockThreadPool); when(mockClient.settings()).thenReturn(Settings.EMPTY); PersistentTasksService persistentTasksService = new PersistentTasksService(null, null, mockClient) { @@ -476,9 +490,10 @@ public void sendCompletionRequest( MockExecutor executor = new MockExecutor(); PersistentTasksNodeService coordinator = new PersistentTasksNodeService( + mockThreadPool, persistentTasksService, registry, - new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), + new TaskManager(Settings.EMPTY, mockThreadPool, Collections.emptySet()), executor ); From a124bafe7e175250e1169bfe142233f33479aeae Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Wed, 3 Aug 2022 12:42:54 +0200 Subject: [PATCH 077/265] REST tests and spec for bulk update API keys (#89027) This PR adds REST API spec and YAML test files for the BulkUpdateApiKey operation. --- .../api/security.bulk_update_api_keys.json | 32 ++ .../security/authc/ApiKeyIntegTests.java | 94 ++++ .../rest-api-spec/test/api_key/30_update.yml | 517 +++++++++++++++++- 3 files changed, 622 insertions(+), 21 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/security.bulk_update_api_keys.json diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.bulk_update_api_keys.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.bulk_update_api_keys.json new file mode 100644 index 0000000000000..69d3255776f37 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.bulk_update_api_keys.json @@ -0,0 +1,32 @@ +{ + "security.bulk_update_api_keys": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-update-api-keys.html", + "description": "Updates the attributes of multiple existing API keys." + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": [ + "application/json" + ], + "content_type": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_security/api_key/_bulk_update", + "methods": [ + "POST" + ] + } + ] + }, + "body": { + "description": "The API key request to update the attributes of multiple API keys.", + "required": true + } + } +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 1dda9e913f434..7cf5fac69fff6 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -1645,6 +1645,90 @@ public void testBulkUpdateApiKeysWithDuplicates() throws ExecutionException, Int assertThat(response.getNoops(), empty()); } + public void testBulkUpdateApiKeysWithDifferentLimitedByRoleDescriptorsForSameUser() throws ExecutionException, InterruptedException, + IOException { + // Create separate native realm user and role for user role change test + final var nativeRealmUser = randomAlphaOfLengthBetween(5, 10); + final var nativeRealmRole = randomAlphaOfLengthBetween(5, 10); + createNativeRealmUser( + nativeRealmUser, + nativeRealmRole, + new String(HASHER.hash(TEST_PASSWORD_SECURE_STRING)), + Collections.singletonMap("Authorization", basicAuthHeaderValue(TEST_USER_NAME, TEST_PASSWORD_SECURE_STRING)) + ); + final List firstGenerationClusterPrivileges = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names())); + // At a minimum include privilege to manage own API key to ensure no 403 + firstGenerationClusterPrivileges.add(randomFrom("manage_api_key", "manage_own_api_key")); + final RoleDescriptor firstGenerationRoleDescriptor = putRoleWithClusterPrivileges( + nativeRealmRole, + firstGenerationClusterPrivileges.toArray(new String[0]) + ); + final Tuple, List>> firstGenerationApiKeys = createApiKeys( + Collections.singletonMap("Authorization", basicAuthHeaderValue(nativeRealmUser, TEST_PASSWORD_SECURE_STRING)), + randomIntBetween(1, 5), + null, + "all" + ); + final List firstGenerationApiKeyIds = firstGenerationApiKeys.v1().stream().map(CreateApiKeyResponse::getId).toList(); + expectRoleDescriptorsForApiKeys( + "limited_by_role_descriptors", + Set.of(firstGenerationRoleDescriptor), + firstGenerationApiKeyIds.stream().map(this::getApiKeyDocument).toList() + ); + // Update user's permissions and create new API keys for the user. The new API keys will have different limited-by role descriptors + final List secondGenerationClusterPrivileges = randomValueOtherThan(firstGenerationClusterPrivileges, () -> { + final List privs = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names())); + // At a minimum include privilege to manage own API key to ensure no 403 + privs.add(randomFrom("manage_api_key", "manage_own_api_key")); + return privs; + }); + final RoleDescriptor secondGenerationRoleDescriptor = putRoleWithClusterPrivileges( + nativeRealmRole, + secondGenerationClusterPrivileges.toArray(new String[0]) + ); + final Tuple, List>> secondGenerationApiKeys = createApiKeys( + Collections.singletonMap("Authorization", basicAuthHeaderValue(nativeRealmUser, TEST_PASSWORD_SECURE_STRING)), + randomIntBetween(1, 5), + null, + "all" + ); + final List secondGenerationApiKeyIds = secondGenerationApiKeys.v1().stream().map(CreateApiKeyResponse::getId).toList(); + expectRoleDescriptorsForApiKeys( + "limited_by_role_descriptors", + Set.of(secondGenerationRoleDescriptor), + secondGenerationApiKeyIds.stream().map(this::getApiKeyDocument).toList() + ); + // Update user role then bulk update all API keys. This should result in new limited-by role descriptors for all API keys + final List allIds = Stream.concat(firstGenerationApiKeyIds.stream(), secondGenerationApiKeyIds.stream()).toList(); + final List finalClusterPrivileges = randomValueOtherThanMany( + p -> firstGenerationClusterPrivileges.equals(p) || secondGenerationClusterPrivileges.equals(p), + () -> { + final List privs = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names())); + // At a minimum include privilege to manage own API key to ensure no 403 + privs.add(randomFrom("manage_api_key", "manage_own_api_key")); + return privs; + } + ); + final RoleDescriptor finalRoleDescriptor = putRoleWithClusterPrivileges( + nativeRealmRole, + finalClusterPrivileges.toArray(new String[0]) + ); + + final var response = executeBulkUpdateApiKey( + nativeRealmUser, + BulkUpdateApiKeyRequest.usingApiKeyIds(allIds.toArray(String[]::new)) + ); + + assertThat(response.getErrorDetails(), anEmptyMap()); + assertThat(response.getNoops(), empty()); + assertThat(response.getUpdated(), containsInAnyOrder(allIds.toArray())); + expectRoleDescriptorsForApiKeys( + "limited_by_role_descriptors", + Set.of(finalRoleDescriptor), + allIds.stream().map(this::getApiKeyDocument).toList() + ); + } + public void testUpdateApiKeysAutoUpdatesUserFields() throws Exception { // Create separate native realm user and role for user role change test final var nativeRealmUser = randomAlphaOfLengthBetween(5, 10); @@ -2179,6 +2263,16 @@ private void expectRoleDescriptorsForApiKey( } } + private void expectRoleDescriptorsForApiKeys( + final String roleDescriptorType, + final Collection expectedRoleDescriptors, + final List> actualRawApiKeyDocs + ) throws IOException { + for (Map actualDoc : actualRawApiKeyDocs) { + expectRoleDescriptorsForApiKey(roleDescriptorType, expectedRoleDescriptors, actualDoc); + } + } + private Map getApiKeyDocument(String apiKeyId) { return client().execute(GetAction.INSTANCE, new GetRequest(SECURITY_MAIN_ALIAS, apiKeyId)).actionGet().getSource(); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/30_update.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/30_update.yml index 73ff3fba19b46..82af1fb492493 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/30_update.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/30_update.yml @@ -153,16 +153,16 @@ teardown: security.get_api_key: id: "$user1_key_id" owner: true - - length: { "api_keys" : 1 } + - length: { "api_keys": 1 } - match: { - "api_keys.0.metadata": { - "letter": "a", - "number": 42 - } + "api_keys.0.metadata": { + "letter": "a", + "number": 42 } + } --- -"Test update api key without request fields": +"Test update api key without explicit field updates": - do: headers: @@ -171,17 +171,6 @@ teardown: body: > { "name": "user1-api-key", - "role_descriptors": { - "role-a": { - "cluster": ["all"], - "index": [ - { - "names": ["index-a"], - "privileges": ["read"] - } - ] - } - }, "metadata": { "letter": "a", "number": 42 @@ -214,7 +203,7 @@ teardown: Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 security.update_api_key: id: "$user1_key_id" - body: {} + body: { } - match: { updated: true } # Check update works without a body @@ -240,7 +229,7 @@ teardown: } } - # Check privileges auto-updated to owner user's + # Check privileges auto-updated based on owner user's - do: headers: Authorization: ApiKey ${login_creds} @@ -326,10 +315,10 @@ teardown: owner: true - length: { "api_keys": 1 } - match: { - "api_keys.0.metadata": {} + "api_keys.0.metadata": { } } - # Check privileges auto-updated to owner user's + # Check privileges auto-updated based on owner user's - do: headers: Authorization: ApiKey ${login_creds} @@ -346,3 +335,489 @@ teardown: ] } - match: { "has_all_requested": true } + +--- +"Test bulk update api keys": + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.create_api_key: + body: > + { + "name": "api-key-1", + "role_descriptors": { + "role-a": { + "cluster": ["none"], + "index": [ + { + "names": ["index-a"], + "privileges": ["read"] + } + ] + } + } + } + - match: { name: "api-key-1" } + - is_true: id + - is_true: api_key + - set: { id: key_id_1 } + - transform_and_set: { login_creds_1: "#base64EncodeCredentials(id,api_key)" } + - match: { encoded: $login_creds_1 } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.create_api_key: + body: > + { + "name": "api-key-2", + "role_descriptors": { + "role-a": { + "cluster": ["monitor"] + } + } + } + - match: { name: "api-key-2" } + - is_true: id + - is_true: api_key + - set: { id: key_id_2 } + - transform_and_set: { login_creds_2: "#base64EncodeCredentials(id,api_key)" } + - match: { encoded: $login_creds_2 } + + # Check API keys do not have requested privileges + - do: + headers: + Authorization: ApiKey ${login_creds_1} + security.has_privileges: + user: null + body: > + { + "cluster": ["manage_own_api_key"], + "index": [ + { + "names": ["index-a"], + "privileges": ["write"] + }, + { + "names": ["index-b"], + "privileges": ["read"] + } + ] + } + - match: { "has_all_requested": false } + + - do: + headers: + Authorization: ApiKey ${login_creds_2} + security.has_privileges: + user: null + body: > + { + "cluster": ["manage_own_api_key"], + "index": [ + { + "names": ["index-a"], + "privileges": ["write"] + }, + { + "names": ["index-b"], + "privileges": ["read"] + } + ] + } + - match: { "has_all_requested": false } + + # Bulk update API keys to above privileges + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.bulk_update_api_keys: + body: > + { + "ids": ["$key_id_1", "$key_id_2"], + "role_descriptors": { + "role-a": { + "cluster": ["all"], + "index": [ + { + "names": ["index-a"], + "privileges": ["write"] + }, + { + "names": ["index-b"], + "privileges": ["read"] + } + ] + } + }, + "metadata": { + "letter": "a", + "number": 42 + } + } + - length: { "noops": 0 } + - length: { "updated": 2 } + - match: { + "updated.0": "$key_id_1" + } + - match: { + "updated.1": "$key_id_2" + } + + # Bulk update without request fields does not update API keys + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.bulk_update_api_keys: + body: > + { + "ids": ["$key_id_1", "$key_id_2"] + } + - is_false: errors + - length: { "updated": 0 } + - length: { "noops": 2 } + - match: { + "noops.0": "$key_id_1" + } + - match: { + "noops.1": "$key_id_2" + } + + # Check updated privileges + - do: + headers: + Authorization: ApiKey ${login_creds_1} + security.has_privileges: + user: null + body: > + { + "cluster": ["manage_own_api_key"], + "index": [ + { + "names": ["index-a"], + "privileges": ["write"] + }, + { + "names": ["index-b"], + "privileges": ["read"] + } + ] + } + - match: { "has_all_requested": true } + + - do: + headers: + Authorization: ApiKey ${login_creds_2} + security.has_privileges: + user: null + body: > + { + "cluster": ["manage_own_api_key"], + "index": [ + { + "names": ["index-a"], + "privileges": ["write"] + }, + { + "names": ["index-b"], + "privileges": ["read"] + } + ] + } + - match: { "has_all_requested": true } + + # Check that metadata was updated + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.get_api_key: + owner: true + - length: { "api_keys": 2 } + - match: { + "api_keys.0.metadata": { + "letter": "a", + "number": 42 + } + } + - match: { + "api_keys.1.metadata": { + "letter": "a", + "number": 42 + } + } +--- +"Test bulk update api key without explicit field updates": + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.create_api_key: + body: > + { + "name": "api-key-1", + "metadata": { + "letter": "a", + "number": 42 + } + } + - match: { name: "api-key-1" } + - is_true: id + - is_true: api_key + - set: { id: key_id_1 } + - transform_and_set: { login_creds_1: "#base64EncodeCredentials(id,api_key)" } + - match: { encoded: $login_creds_1 } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.create_api_key: + body: > + { + "name": "api-key-2", + "metadata": { + "letter": "b", + "number": 43 + } + } + - match: { name: "api-key-2" } + - is_true: id + - is_true: api_key + - set: { id: key_id_2 } + - transform_and_set: { login_creds_2: "#base64EncodeCredentials(id,api_key)" } + - match: { encoded: $login_creds_2 } + + # Give user new cluster privilege to test auto update + - do: + security.put_role: + name: "user_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": "index-a", + "privileges": ["all"] + } + ] + } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.bulk_update_api_keys: + body: > + { + "ids": ["$key_id_1", "$key_id_2"] + } + - is_false: errors + - length: { "noops": 0 } + - length: { "updated": 2 } + - match: { + "updated.0": "$key_id_1" + } + - match: { + "updated.1": "$key_id_2" + } + + # Check privileges auto-updated based on owner user's + - do: + headers: + Authorization: ApiKey ${login_creds_1} + security.has_privileges: + user: null + body: > + { + "cluster": ["all"], + "index": [ + { + "names": ["index-a"], + "privileges": ["read"] + } + ] + } + - match: { "has_all_requested": true } + + - do: + headers: + Authorization: ApiKey ${login_creds_2} + security.has_privileges: + user: null + body: > + { + "cluster": ["all"], + "index": [ + { + "names": ["index-a"], + "privileges": ["read"] + } + ] + } + - match: { "has_all_requested": true } + + # Check that metadata was not updated + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.get_api_key: + id: "$key_id_1" + - length: { "api_keys": 1 } + - match: { + "api_keys.0.metadata": { + "letter": "a", + "number": 42 + } + } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.get_api_key: + id: "$key_id_2" + - length: { "api_keys": 1 } + - match: { + "api_keys.0.metadata": { + "letter": "b", + "number": 43 + } + } +--- +"Test bulk update api key with empty request fields": + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.create_api_key: + body: > + { + "name": "api-key-1", + "role_descriptors": { + "role-a": { + "cluster": ["none"], + "index": [ + { + "names": ["index-a"], + "privileges": ["none"] + } + ] + } + }, + "metadata": { + "letter": "a", + "number": 42 + } + } + - match: { name: "api-key-1" } + - is_true: id + - is_true: api_key + - set: { id: key_id_1 } + - transform_and_set: { login_creds_1: "#base64EncodeCredentials(id,api_key)" } + - match: { encoded: $login_creds_1 } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.create_api_key: + body: > + { + "name": "api-key-2", + "role_descriptors": { + "role-a": { + "cluster": ["monitor"] + } + }, + "metadata": { + "letter": "b", + "number": 43 + } + } + - match: { name: "api-key-2" } + - is_true: id + - is_true: api_key + - set: { id: key_id_2 } + - transform_and_set: { login_creds_2: "#base64EncodeCredentials(id,api_key)" } + - match: { encoded: $login_creds_2 } + + # Give user new cluster privilege to test auto update + - do: + security.put_role: + name: "user_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": "index-a", + "privileges": ["all"] + } + ] + } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.bulk_update_api_keys: + body: > + { + "ids": ["$key_id_1", "$key_id_2"], + "role_descriptors": {}, + "metadata": {} + } + - is_false: errors + - length: { "noops": 0 } + - length: { "updated": 2 } + - match: { + "updated.0": "$key_id_1" + } + - match: { + "updated.1": "$key_id_2" + } + + # Check privileges auto-updated based on owner user's + - do: + headers: + Authorization: ApiKey ${login_creds_1} + security.has_privileges: + user: null + body: > + { + "cluster": ["all"], + "index": [ + { + "names": ["index-a"], + "privileges": ["read"] + } + ] + } + - match: { "has_all_requested": true } + + - do: + headers: + Authorization: ApiKey ${login_creds_2} + security.has_privileges: + user: null + body: > + { + "cluster": ["all"], + "index": [ + { + "names": ["index-a"], + "privileges": ["read"] + } + ] + } + - match: { "has_all_requested": true } + + # Check that metadata was updated + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyXzE6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" # api_key_user_1 + security.get_api_key: + owner: true + - length: { "api_keys": 2 } + - match: { + "api_keys.0.metadata": { } + } + - match: { + "api_keys.1.metadata": { } + } From f4fb03c5e2ae46e99b51cf344d70921643c4ebaa Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 3 Aug 2022 12:43:54 +0200 Subject: [PATCH 078/265] Make org.elasticsearch.cluster.routing.RoutingNode#copyShards use Array (#88788) We used this in three spots, where it copies potentially huge arrays. One of those spots doesn't need the `copyShards` call at all and can use the normal iterator as there's no concurrent modfication. The other two spots can at least just use an array, which will iterate a little faster than a mutable list and also potentially saves another round copying the array in the `ArrayList` constructor that the compiler seems to not be able to eliminate in all cases. --- .../org/elasticsearch/cluster/routing/RoutingNode.java | 4 ++-- .../elasticsearch/cluster/routing/RoutingNodes.java | 7 ++++--- .../routing/allocation/DiskThresholdMonitor.java | 10 ++++++---- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java index febb71d5b0bb8..c824064d43e24 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java @@ -337,8 +337,8 @@ public String toString() { return sb.toString(); } - public List copyShards() { - return new ArrayList<>(shards.values()); + public ShardRouting[] copyShards() { + return shards.values().toArray(EMPTY_SHARD_ROUTING_ARRAY); } public boolean isEmpty() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 14df43d0845dd..88772ea8b6b8d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; @@ -1272,9 +1273,9 @@ private void ensureMutable() { public Iterator nodeInterleavedShardIterator() { final Queue> queue = new ArrayDeque<>(nodesToShards.size()); for (final var routingNode : nodesToShards.values()) { - final var iterator = routingNode.copyShards().iterator(); - if (iterator.hasNext()) { - queue.add(iterator); + final var shards = routingNode.copyShards(); + if (shards.length > 0) { + queue.add(Iterators.forArray(shards)); } } return new Iterator<>() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index 2d858780ce2d2..62ec36bfc7824 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.index.Index; import java.util.ArrayList; import java.util.Collections; @@ -384,9 +383,12 @@ public void onNewInfo(ClusterInfo info) { .collect(Collectors.toSet()); // Generate a set of all the indices that exist on either the target or source of a node replacement - final Set indicesOnReplaceSourceOrTarget = nodesIdsPartOfReplacement.stream() - .flatMap(nodeId -> state.getRoutingNodes().node(nodeId).copyShards().stream().map(ShardRouting::index).map(Index::getName)) - .collect(Collectors.toSet()); + final Set indicesOnReplaceSourceOrTarget = new HashSet<>(); + for (String nodeId : nodesIdsPartOfReplacement) { + for (ShardRouting shardRouting : state.getRoutingNodes().node(nodeId)) { + indicesOnReplaceSourceOrTarget.add(shardRouting.index().getName()); + } + } final Set indicesToAutoRelease = state.routingTable() .indicesRouting() From 0d38d10118bd292b0b7693f67b45b3edcf925cd3 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 3 Aug 2022 13:54:41 +0300 Subject: [PATCH 079/265] [ML] Mute model deployment rolling upgrade tests for backport (#89069) ... of #88855 --- .../org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java index 682875ae5a2e5..b1ec3bdc2a129 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -29,6 +30,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/89068") public class MLModelDeploymentsUpgradeIT extends AbstractUpgradeTestCase { // See PyTorchModelIT for how this model was created From 6bf5078fa99ca2806c8fd75fc65a351440172341 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Denilson=20das=20Merc=C3=AAs=20Amorim?= Date: Wed, 3 Aug 2022 08:07:17 -0300 Subject: [PATCH 080/265] Improve efficiency of BoundedBreakIteratorScanner fragmentation algorithm (#89041) As discussed in #73569 the current implementation is too slow in certain scenarios. The inefficient part of the code can be stated as the following problem: Given a text (getText()) and a position in this text (offset), find the sentence boundary before and after the offset, in such a way that the after boundary is maximal but respects end boundary - start boundary < fragment size. In case it's impossible to produce an after boundary that respects the said condition, use the nearest boundary following offset. The current approach begins by finding the nearest preceding and following boundaries, and expands the following boundary greedily while it respects the problem restriction. This is fine asymptotically, but BreakIterator which is used to find each boundary is sometimes expensive. This new approach maximizes the after boundary by scanning for the last boundary preceding the position that would cause the condition to be violated (i.e. knowing start boundary and offset, how many characters are left before resulting length is fragment size). If this scan finds the start boundary, it means it's impossible to satisfy the problem restriction, and we get the first boundary following offset instead (or better, since we already scanned [offset, targetEndOffset], start from targetEndOffset + 1). --- docs/changelog/89041.yaml | 8 +++++ .../BoundedBreakIteratorScanner.java | 31 +++++++++++++------ .../BoundedBreakIteratorScannerTests.java | 28 +++++++++++++++++ 3 files changed, 58 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/89041.yaml diff --git a/docs/changelog/89041.yaml b/docs/changelog/89041.yaml new file mode 100644 index 0000000000000..2ec38d3848fda --- /dev/null +++ b/docs/changelog/89041.yaml @@ -0,0 +1,8 @@ +pr: 89041 +summary: Improve efficiency of `BoundedBreakIteratorScanner` fragmentation algorithm +area: Highlighting +type: enhancement +issues: + - 73569 + - 73785 + diff --git a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/BoundedBreakIteratorScanner.java b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/BoundedBreakIteratorScanner.java index 0864e82346d57..8394628746392 100644 --- a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/BoundedBreakIteratorScanner.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/BoundedBreakIteratorScanner.java @@ -89,16 +89,29 @@ public int preceding(int offset) { innerStart = innerEnd; innerEnd = windowEnd; } else { - windowStart = innerStart = mainBreak.preceding(offset); - windowEnd = innerEnd = mainBreak.following(offset - 1); - // expand to next break until we reach maxLen - while (innerEnd - innerStart < maxLen) { - int newEnd = mainBreak.following(innerEnd); - if (newEnd == DONE || (newEnd - innerStart) > maxLen) { - break; - } - windowEnd = innerEnd = newEnd; + innerStart = Math.max(mainBreak.preceding(offset), 0); + + final long targetEndOffset = (long) offset + Math.max(0, maxLen - (offset - innerStart)); + final int textEndIndex = getText().getEndIndex(); + + if (targetEndOffset + 1 > textEndIndex) { + innerEnd = textEndIndex; + } else { + innerEnd = mainBreak.preceding((int) targetEndOffset + 1); + } + + assert innerEnd != DONE && innerEnd >= innerStart; + + // in case no break was found up to maxLen, find one afterwards. + if (innerStart == innerEnd) { + innerEnd = mainBreak.following((int) targetEndOffset); + assert innerEnd - innerStart > maxLen; + } else { + assert innerEnd - innerStart <= maxLen; } + + windowStart = innerStart; + windowEnd = innerEnd; } if (innerEnd - innerStart > maxLen) { diff --git a/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/BoundedBreakIteratorScannerTests.java b/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/BoundedBreakIteratorScannerTests.java index 4146ede5311c5..db0e5e1e4c43f 100644 --- a/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/BoundedBreakIteratorScannerTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/BoundedBreakIteratorScannerTests.java @@ -117,4 +117,32 @@ public void testBoundedSentence() { testRandomAsciiTextCase(BoundedBreakIteratorScanner.getSentence(Locale.ROOT, maxLen), maxLen); } } + + public void testTextThatEndsBeforeMaxLen() { + BreakIterator bi = BoundedBreakIteratorScanner.getSentence(Locale.ROOT, 1000); + + final String text = "This is the first test sentence. Here is the second one."; + + int offset = text.indexOf("first"); + bi.setText(text); + assertEquals(0, bi.preceding(offset)); + assertEquals(text.length(), bi.following(offset - 1)); + + offset = text.indexOf("second"); + bi.setText(text); + assertEquals(33, bi.preceding(offset)); + assertEquals(text.length(), bi.following(offset - 1)); + } + + public void testFragmentSizeThatIsTooBig() { + final int fragmentSize = Integer.MAX_VALUE; + BreakIterator bi = BoundedBreakIteratorScanner.getSentence(Locale.ROOT, fragmentSize); + + final String text = "Any sentence"; + final int offset = 0; // find at beggining of text + + bi.setText(text); + assertEquals(0, bi.preceding(offset)); + assertEquals(text.length(), bi.following(offset - 1)); + } } From 3708ca6b46cf890b7ad66bceff2c596bef62bfd9 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 3 Aug 2022 14:37:24 +0300 Subject: [PATCH 081/265] [ML] Adjust assignment serialization versions after backport (#89071) ... of #88855 --- .../core/ml/inference/assignment/TrainedModelAssignment.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index f559e39546626..91258c00f7c71 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -142,7 +142,7 @@ public TrainedModelAssignment(StreamInput in) throws IOException { this.assignmentState = in.readEnum(AssignmentState.class); this.reason = in.readOptionalString(); this.startTime = in.readInstant(); - if (in.getVersion().onOrAfter(Version.V_8_5_0)) { + if (in.getVersion().onOrAfter(Version.V_8_4_0)) { this.maxAssignedAllocations = in.readVInt(); } else { this.maxAssignedAllocations = totalCurrentAllocations(); @@ -273,7 +273,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(assignmentState); out.writeOptionalString(reason); out.writeInstant(startTime); - if (out.getVersion().onOrAfter(Version.V_8_5_0)) { + if (out.getVersion().onOrAfter(Version.V_8_4_0)) { out.writeVInt(maxAssignedAllocations); } } From 30142ea1e0237b00dbe2324bbd52a0f14a67bf28 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 3 Aug 2022 12:40:06 +0100 Subject: [PATCH 082/265] [ML] Mute tests for inference result format change (#89075) These tests will fail if elastic/ml-cpp#2376 with them unmuted. #88901 will follow up with the Java side changes. --- .../org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java | 1 + .../resources/rest-api-spec/test/ml/3rd_party_deployment.yml | 2 ++ .../xpack/restart/MLModelDeploymentFullClusterRestartIT.java | 1 + .../elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java | 3 +-- 4 files changed, 5 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index 40eb8a77913b0..9498b58bb5b22 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -75,6 +75,7 @@ * torch.jit.save(traced_model, "simplemodel.pt") * ## End Python */ +@ESRestTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/ml-cpp/pull/2376") public class PyTorchModelIT extends ESRestTestCase { private static final String BASIC_AUTH_VALUE_SUPER_USER = UsernamePasswordToken.basicAuthHeaderValue( diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml index 6d0348b1fba92..bc4a36cef9ddd 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml @@ -76,6 +76,8 @@ setup: --- "Test start and stop deployment with cache": - skip: + version: all + reason: "@AwaitsFix https://github.com/elastic/ml-cpp/pull/2376" features: allowed_warnings - do: diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index f1c7c04905bea..b0e624b470d0b 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -31,6 +31,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +@AbstractFullClusterRestartTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/ml-cpp/pull/2376") public class MLModelDeploymentFullClusterRestartIT extends AbstractFullClusterRestartTestCase { // See PyTorchModelIT for how this model was created diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java index b1ec3bdc2a129..8109ce0f7d0f3 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -30,7 +29,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/89068") +@AbstractUpgradeTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/ml-cpp/pull/2376") public class MLModelDeploymentsUpgradeIT extends AbstractUpgradeTestCase { // See PyTorchModelIT for how this model was created From 512bfebc10a2544450b4775bceff71909d96df82 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Wed, 3 Aug 2022 14:13:31 +0100 Subject: [PATCH 083/265] Provide tracing implementation using OpenTelemetry + APM agent (#88443) Part of #84369. Implement the `Tracer` interface by providing a module that uses OpenTelemetry, along with Elastic's APM agent for Java. See the file `TRACING.md` for background on the changes and the reasoning for some of the implementation decisions. The configuration mechanism is the most fiddly part of this PR. The Security Manager permissions required by the APM Java agent make it prohibitive to start an agent from within Elasticsearch programmatically, so it must be configured when the ES JVM starts. That means that the startup CLI needs to assemble the required JVM options. To complicate matters further, the APM agent needs a secret token in order to ship traces to the APM server. We can't use Java system properties to configure this, since otherwise the secret will be readable to all code in Elasticsearch. It therefore has to be configured in a dedicated config file. This in itself is awkward, since we don't want to leave secrets in config files. Therefore, we pull the APM secret token from the keystore, write it to a config file, then delete the config file after ES starts. There's a further issue with the config file. Any options we set in the APM agent config file cannot later be reconfigured via system properties, so we need to make sure that only "static" configuration goes into the config file. I generated most of the files under `qa/apm` using an APM test utility (I can't remember which one now, unfortunately). The goal is to setup up a complete system so that traces can be captured in APM server, and the results in Elasticsearch inspected. --- TRACING.md | 156 ++++++ .../server/cli/APMJvmOptions.java | 290 +++++++++++ .../server/cli/JvmOptionsParser.java | 34 +- .../elasticsearch/server/cli/ServerCli.java | 38 +- .../server/cli/ServerProcess.java | 25 +- .../server/cli/ServerCliTests.java | 2 +- .../server/cli/ServerProcessTests.java | 18 +- .../windows/service/WindowsServiceDaemon.java | 2 +- docs/changelog/88443.yaml | 6 + modules/apm/build.gradle | 24 + .../elastic-apm-agent-1.33.0.jar.sha1 | 1 + .../licenses/elastic-apm-agent-LICENSE.txt | 201 ++++++++ .../apm/licenses/elastic-apm-agent-NOTICE.txt | 465 ++++++++++++++++++ .../apm/licenses/opentelemetry-LICENSE.txt | 201 ++++++++ modules/apm/licenses/opentelemetry-NOTICE.txt | 0 .../opentelemetry-api-1.15.0.jar.sha1 | 1 + .../opentelemetry-context-1.15.0.jar.sha1 | 1 + ...pentelemetry-semconv-1.15.0-alpha.jar.sha1 | 1 + .../org/elasticsearch/tracing/apm/APM.java | 107 ++++ .../tracing/apm/APMAgentSettings.java | 156 ++++++ .../elasticsearch/tracing/apm/APMTracer.java | 394 +++++++++++++++ .../plugin-metadata/plugin-security.policy | 22 + .../tracing/apm/APMAgentSettingsTests.java | 89 ++++ .../tracing/apm/APMTracerTests.java | 174 +++++++ qa/apm/build.gradle | 49 ++ qa/apm/config/elasticsearch/roles.yml | 34 ++ qa/apm/config/elasticsearch/service_tokens | 2 + qa/apm/config/elasticsearch/users | 9 + qa/apm/config/elasticsearch/users_roles | 13 + qa/apm/config/kibana/kibana-8.yml | 78 +++ qa/apm/docker-compose.yml | 154 ++++++ qa/apm/scripts/tls/apm-server/cert.crt | 27 + qa/apm/scripts/tls/apm-server/key.pem | 52 ++ .../org/elasticsearch/tracing/apm/ApmIT.java | 209 ++++++++ .../elasticsearch/action/ActionModule.java | 1 + .../elasticsearch/bootstrap/PolicyUtil.java | 3 +- .../java/org/elasticsearch/node/Node.java | 45 +- .../elasticsearch/plugins/TracerPlugin.java | 3 +- .../elasticsearch/bootstrap/security.policy | 4 + .../rest/RestControllerTests.java | 49 +- 40 files changed, 3046 insertions(+), 94 deletions(-) create mode 100644 TRACING.md create mode 100644 distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java create mode 100644 docs/changelog/88443.yaml create mode 100644 modules/apm/build.gradle create mode 100644 modules/apm/licenses/elastic-apm-agent-1.33.0.jar.sha1 create mode 100644 modules/apm/licenses/elastic-apm-agent-LICENSE.txt create mode 100644 modules/apm/licenses/elastic-apm-agent-NOTICE.txt create mode 100644 modules/apm/licenses/opentelemetry-LICENSE.txt create mode 100644 modules/apm/licenses/opentelemetry-NOTICE.txt create mode 100644 modules/apm/licenses/opentelemetry-api-1.15.0.jar.sha1 create mode 100644 modules/apm/licenses/opentelemetry-context-1.15.0.jar.sha1 create mode 100644 modules/apm/licenses/opentelemetry-semconv-1.15.0-alpha.jar.sha1 create mode 100644 modules/apm/src/main/java/org/elasticsearch/tracing/apm/APM.java create mode 100644 modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMAgentSettings.java create mode 100644 modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java create mode 100644 modules/apm/src/main/plugin-metadata/plugin-security.policy create mode 100644 modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMAgentSettingsTests.java create mode 100644 modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMTracerTests.java create mode 100644 qa/apm/build.gradle create mode 100644 qa/apm/config/elasticsearch/roles.yml create mode 100644 qa/apm/config/elasticsearch/service_tokens create mode 100644 qa/apm/config/elasticsearch/users create mode 100644 qa/apm/config/elasticsearch/users_roles create mode 100644 qa/apm/config/kibana/kibana-8.yml create mode 100644 qa/apm/docker-compose.yml create mode 100644 qa/apm/scripts/tls/apm-server/cert.crt create mode 100644 qa/apm/scripts/tls/apm-server/key.pem create mode 100644 qa/apm/src/test/java/org/elasticsearch/tracing/apm/ApmIT.java diff --git a/TRACING.md b/TRACING.md new file mode 100644 index 0000000000000..b998850d43dc2 --- /dev/null +++ b/TRACING.md @@ -0,0 +1,156 @@ +# Tracing in Elasticsearch + +Elasticsearch is instrumented using the [OpenTelemetry][otel] API, which allows +us to gather traces and analyze what Elasticsearch is doing. + + +## How is tracing implemented? + +The Elasticsearch server code contains a [`tracing`][tracing] package, which is +an abstraction over the OpenTelemetry API. All locations in the code that +perform instrumentation and tracing must use these abstractions. + +Separately, there is the [`apm`](./modules/apm/) module, which works with the +OpenTelemetry API directly to record trace data. Underneath the OTel API, we +use Elastic's [APM agent for Java][agent], which attaches at runtime to the +Elasticsearch JVM and removes the need for Elasticsearch to hard-code the use of +an OTel implementation. Note that while it is possible to programmatically start +the APM agent, the Security Manager permissions required make this essentially +impossible. + + +## How is tracing configured? + +You must supply configuration and credentials for the APM server (see below). +You must also set `tracing.apm.enabled` to `true`, but this can be toggled at +runtime. + +All APM settings live under `tracing.apm`. All settings related to the Java agent +go under `tracing.apm.agent`. Anything you set under there will be propagated to +the agent. + +For agent settings that can be changed dynamically, you can use the cluster +settings REST API. For example, to change the sampling rate: + + curl -XPUT \ + -H "Content-type: application/json" \ + -u "$USERNAME:$PASSWORD" \ + -d '{ "persistent": { "tracing.apm.agent.transaction_sample_rate": "0.75" } }' \ + https://localhost:9200/_cluster/settings + + +### More details about configuration + +For context, the APM agent pulls configuration from [multiple +sources][agent-config], with a hierarchy that means, for example, that options +set in the config file cannot be overridden via system properties. + +Now, in order to send tracing data to the APM server, ES needs to configured with +either a `secret_key` or an `api_key`. We could configure these in the agent via +system properties, but then their values would be available to any Java code in +Elasticsearch that can read system properties. + +Instead, when Elasticsearch bootstraps itself, it compiles all APM settings +together, including any `secret_key` or `api_key` values from the ES keystore, +and writes out a temporary APM config file containing all static configuration +(i.e. values that cannot change after the agent starts). This file is deleted +as soon as possible after ES starts up. Settings that are not sensitive and can +be changed dynamically are configured via system properties. Calls to the ES +settings REST API are translated into system property writes, which the agent +later picks up and applies. + +## Where is tracing data sent? + +You need to have an APM server running somewhere. For example, you can create a +deployment in [Elastic Cloud](https://www.elastic.co/cloud/) with Elastic's APM +integration. + +## What do we trace? + +We primarily trace "tasks". The tasks framework in Elasticsearch allows work to +be scheduled for execution, cancelled, executed in a different thread pool, and +so on. Tracing a task results in a "span", which represents the execution of the +task in the tracing system. We also instrument REST requests, which are not (at +present) modelled by tasks. + +A span can be associated with a parent span, which allows all spans in, for +example, a REST request to be grouped together. Spans can track work across +different Elasticsearch nodes. + +Elasticsearch also supports distributed tracing via [W3c Trace Context][w3c] +headers. If clients of Elasticsearch send these headers with their requests, +then that data will be forwarded to the APM server in order to yield a trace +across systems. + +In rare circumstances, it is possible to avoid tracing a task using +`TaskManager#register(String,String,TaskAwareRequest,boolean)`. For example, +Machine Learning uses tasks to record which models are loaded on each node. Such +tasks are long-lived and are not suitable candidates for APM tracing. + +## Thread contexts and nested spans + +When a span is started, Elasticsearch tracks information about that span in the +current [thread context][thread-context]. If a new thread context is created, +then the current span information must not propagated but instead renamed, so +that (1) it doesn't interfere when new trace information is set in the context, +and (2) the previous trace information is available to establish a parent / +child span relationship. This is done with `ThreadContext#newTraceContext()`. + +Sometimes we need to detach new spans from their parent. For example, creating +an index starts some related background tasks, but these shouldn't be associated +with the REST request, otherwise all the background task spans will be +associated with the REST request for as long as Elasticsearch is running. +`ThreadContext` provides the `clearTraceContext`() method for this purpose. + +## How to I trace something that isn't a task? + +First work out if you can turn it into a task. No, really. + +If you can't do that, you'll need to ensure that your class can get access to a +`Tracer` instance (this is available to inject, or you'll need to pass it when +your class is created). Then you need to call the appropriate methods on the +tracer when a span should start and end. You'll also need to manage the creation +of new trace contexts when child spans need to be created. + +## What additional attributes should I set? + +That's up to you. Be careful not to capture anything that could leak sensitive +or personal information. + +## What is "scope" and when should I used it? + +Usually you won't need to. + +That said, sometimes you may want more details to be captured about a particular +section of code. You can think of "scope" as representing the currently active +tracing context. Using scope allows the APM agent to do the following: + +* Enables automatic correlation between the "active span" and logging, where + logs have also been captured. +* Enables capturing any exceptions thrown when the span is active, and linking + those exceptions to the span +* Allows the sampling profiler to be used as it allows samples to be linked to + the active span (if any), so the agent can automatically get extra spans + without manual instrumentation. + +However, a scope must be closed in the same thread in which it was opened, which +cannot be guaranteed when using tasks, making scope largely useless to +Elasticsearch. + +In the OpenTelemetry documentation, spans, scope and context are fairly +straightforward to use, since `Scope` is an `AutoCloseable` and so can be +easily created and cleaned up use try-with-resources blocks. Unfortunately, +Elasticsearch is a complex piece of software, and also extremely asynchronous, +so the typical OpenTelemetry examples do not work. + +Nonetheless, it is possible to manually use scope where we need more detail by +explicitly opening a scope via the `Tracer`. + + +[otel]: https://opentelemetry.io/ +[thread-context]: ./server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java). +[w3c]: https://www.w3.org/TR/trace-context/ +[tracing]: ./server/src/main/java/org/elasticsearch/tracing/ +[config]: ./modules/apm/src/main/config/elasticapm.properties +[agent-config]: https://www.elastic.co/guide/en/apm/agent/java/master/configuration.html +[agent]: https://www.elastic.co/guide/en/apm/agent/java/current/index.html diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java new file mode 100644 index 0000000000000..d9ad620ffaee5 --- /dev/null +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java @@ -0,0 +1,290 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.server.cli; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +/** + * This class is responsible for working out if APM tracing is configured and if so, preparing + * a temporary config file for the APM Java agent and CLI options to the JVM to configure APM. + * APM doesn't need to be enabled, as that can be toggled at runtime, but some configuration e.g. + * server URL and secret key can only be provided when Elasticsearch starts. + */ +class APMJvmOptions { + /** + * Contains agent configuration that must always be applied, and cannot be overridden. + */ + // tag::noformat + private static final Map STATIC_CONFIG = Map.of( + // Identifies the version of Elasticsearch in the captured trace data. + "service_version", Version.CURRENT.toString(), + + // Configures a log file to write to. `_AGENT_HOME_` is a placeholder used + // by the agent. Don't disable writing to a log file, as the agent will then + // require extra Security Manager permissions when it tries to do something + // else, and it's just painful. + "log_file", "_AGENT_HOME_/../../logs/apm.log", + + // ES does not use auto-instrumentation. + "instrument", "false" + ); + + /** + * Contains default configuration that will be used unless overridden by explicit configuration. + */ + private static final Map CONFIG_DEFAULTS = Map.of( + // This is used to keep all the errors and transactions of a service + // together and is the primary filter in the Elastic APM user interface. + // + // You can optionally also set `service_node_name`, which is used to + // distinguish between different nodes of a service, therefore it should + // be unique for each JVM within a service. If not set, data + // aggregations will be done based on a container ID (where valid) or on + // the reported hostname (automatically discovered or manually + // configured through hostname). However, if this node's `node.name` is + // set, then that value is used for the `service_node_name`. + "service_name", "elasticsearch", + + // An arbitrary string that identifies this deployment environment. For + // example, "dev", "staging" or "prod". Can be anything you like, but must + // have the same value across different systems in the same deployment + // environment. + "environment", "dev", + + // Logging configuration. Unless you need detailed logs about what the APM + // is doing, leave this value alone. + "log_level", "error", + "application_packages", "org.elasticsearch,org.apache.lucene", + "metrics_interval", "120s", + "breakdown_metrics", "false", + "central_config", "false" + ); + // end::noformat + + /** + * Lists all APM configuration keys that are not dynamic and must be configured via the config file. + */ + private static final List STATIC_AGENT_KEYS = List.of( + "api_key", + "aws_lambda_handler", + "breakdown_metrics", + "classes_excluded_from_instrumentation", + "cloud_provider", + "data_flush_timeout", + "disable_metrics", + "disable_send", + "enabled", + "enable_public_api_annotation_inheritance", + "environment", + "global_labels", + "hostname", + "include_process_args", + "log_ecs_formatter_allow_list", + "log_ecs_reformatting_additional_fields", + "log_ecs_reformatting_dir", + "log_file", + "log_file_size", + "log_format_file", + "log_format_sout", + "max_queue_size", + "metrics_interval", + "plugins_dir", + "profiling_inferred_spans_lib_directory", + "secret_token", + "service_name", + "service_node_name", + "service_version", + "stress_monitoring_interval", + "trace_methods_duration_threshold", + "use_jaxrs_path_as_transaction_name", + "verify_server_cert" + ); + + /** + * This method works out if APM tracing is enabled, and if so, prepares a temporary config file + * for the APM Java agent and CLI options to the JVM to configure APM. The config file is temporary + * because it will be deleted once Elasticsearch starts. + * + * @param settings the Elasticsearch settings to consider + * @param keystore a wrapper to access the keystore, or null if there is no keystore + * @param tmpdir Elasticsearch's temporary directory, where the config file will be written + */ + static List apmJvmOptions(Settings settings, @Nullable KeyStoreWrapper keystore, Path tmpdir) throws UserException, + IOException { + final Path agentJar = findAgentJar(); + + if (agentJar == null) { + return List.of(); + } + + final Map propertiesMap = extractApmSettings(settings); + + // No point doing anything if we don't have a destination for the trace data, and it can't be configured dynamically + if (propertiesMap.containsKey("server_url") == false && propertiesMap.containsKey("server_urls") == false) { + return List.of(); + } + + if (propertiesMap.containsKey("service_node_name") == false) { + final String nodeName = settings.get("node.name"); + if (nodeName != null) { + propertiesMap.put("service_node_name", nodeName); + } + } + + if (keystore != null) { + extractSecureSettings(keystore, propertiesMap); + } + final Map dynamicSettings = extractDynamicSettings(propertiesMap); + + final Path tmpProperties = writeApmProperties(tmpdir, propertiesMap); + + final List options = new ArrayList<>(); + // Use an agent argument to specify the config file instead of e.g. `-Delastic.apm.config_file=...` + // because then the agent won't try to reload the file, and we can remove it after startup. + options.add("-javaagent:" + agentJar + "=c=" + tmpProperties); + + dynamicSettings.forEach((key, value) -> options.add("-Delastic.apm." + key + "=" + value)); + + return options; + } + + private static void extractSecureSettings(KeyStoreWrapper keystore, Map propertiesMap) { + final Set settingNames = keystore.getSettingNames(); + for (String key : List.of("api_key", "secret_token")) { + if (settingNames.contains("tracing.apm." + key)) { + try (SecureString token = keystore.getString("tracing.apm." + key)) { + propertiesMap.put(key, token.toString()); + } + } + } + } + + /** + * Removes settings that can be changed dynamically at runtime from the supplied map, and returns + * those settings in a new map. + */ + private static Map extractDynamicSettings(Map propertiesMap) { + final Map cliOptionsMap = new HashMap<>(); + + final Iterator> propertiesIterator = propertiesMap.entrySet().iterator(); + while (propertiesIterator.hasNext()) { + final Map.Entry entry = propertiesIterator.next(); + if (STATIC_AGENT_KEYS.contains(entry.getKey()) == false) { + propertiesIterator.remove(); + cliOptionsMap.put(entry.getKey(), entry.getValue()); + } + } + + return cliOptionsMap; + } + + private static Map extractApmSettings(Settings settings) throws UserException { + final Map propertiesMap = new HashMap<>(); + + final Settings agentSettings = settings.getByPrefix("tracing.apm.agent."); + agentSettings.keySet().forEach(key -> propertiesMap.put(key, String.valueOf(agentSettings.get(key)))); + + // These settings must not be changed + for (String key : STATIC_CONFIG.keySet()) { + if (propertiesMap.containsKey(key)) { + throw new UserException( + ExitCodes.CONFIG, + "Do not set a value for [tracing.apm.agent." + key + "], as this is configured automatically by Elasticsearch" + ); + } + } + + CONFIG_DEFAULTS.forEach(propertiesMap::putIfAbsent); + + propertiesMap.putAll(STATIC_CONFIG); + return propertiesMap; + } + + /** + * Writes a Java properties file with data from supplied map to a temporary config, and returns + * the file that was created. + * + * @param tmpdir the directory for the file + * @param propertiesMap the data to write + * @return the file that was created + * @throws IOException if writing the file fails + */ + private static Path writeApmProperties(Path tmpdir, Map propertiesMap) throws IOException { + final Properties p = new Properties(); + p.putAll(propertiesMap); + + final Path tmpFile = Files.createTempFile(tmpdir, ".elstcapm.", ".tmp"); + try (OutputStream os = Files.newOutputStream(tmpFile)) { + p.store(os, " Automatically generated by Elasticsearch, do not edit!"); + } + return tmpFile; + } + + /** + * The JVM argument that configure the APM agent needs to specify the agent jar path, so this method + * finds the jar by inspecting the filesystem. + * @return the agent jar file + * @throws IOException if a problem occurs reading the filesystem + */ + @Nullable + private static Path findAgentJar() throws IOException, UserException { + final Path apmModule = Path.of(System.getProperty("user.dir")).resolve("modules/apm"); + + if (Files.notExists(apmModule)) { + if (Build.CURRENT.isProductionRelease()) { + throw new UserException( + ExitCodes.CODE_ERROR, + "Expected to find [apm] module in [" + apmModule + "]! Installation is corrupt" + ); + } + return null; + } + + try (var apmStream = Files.list(apmModule)) { + final List paths = apmStream.filter( + path -> path.getFileName().toString().matches("elastic-apm-agent-\\d+\\.\\d+\\.\\d+\\.jar") + ).toList(); + + if (paths.size() > 1) { + throw new UserException( + ExitCodes.CODE_ERROR, + "Found multiple [elastic-apm-agent] jars under [" + apmModule + "]! Installation is corrupt." + ); + } + + if (paths.isEmpty()) { + throw new UserException( + ExitCodes.CODE_ERROR, + "Found no [elastic-apm-agent] jar under [" + apmModule + "]! Installation is corrupt." + ); + } + + return paths.get(0); + } + } +} diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java index 455e9dc607194..b20aad3a0b845 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java @@ -8,8 +8,10 @@ package org.elasticsearch.server.cli; +import org.elasticsearch.bootstrap.ServerArgs; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.settings.KeyStoreWrapper; import java.io.BufferedReader; import java.io.IOException; @@ -68,16 +70,17 @@ SortedMap invalidLines() { * files in the {@code jvm.options.d} directory, and the options given by the {@code ES_JAVA_OPTS} environment * variable. * - * @param configDir the ES config dir - * @param tmpDir the directory that should be passed to {@code -Djava.io.tmpdir} - * @param envOptions the options passed through the ES_JAVA_OPTS env var + * @param keystore the installation's keystore + * @param configDir the ES config dir + * @param tmpDir the directory that should be passed to {@code -Djava.io.tmpdir} + * @param envOptions the options passed through the ES_JAVA_OPTS env var * @return the list of options to put on the Java command line * @throws InterruptedException if the java subprocess is interrupted - * @throws IOException if there is a problem reading any of the files - * @throws UserException if there is a problem parsing the `jvm.options` file or `jvm.options.d` files + * @throws IOException if there is a problem reading any of the files + * @throws UserException if there is a problem parsing the `jvm.options` file or `jvm.options.d` files */ - static List determineJvmOptions(Path configDir, Path tmpDir, String envOptions) throws InterruptedException, IOException, - UserException { + static List determineJvmOptions(ServerArgs args, KeyStoreWrapper keystore, Path configDir, Path tmpDir, String envOptions) + throws InterruptedException, IOException, UserException { final JvmOptionsParser parser = new JvmOptionsParser(); @@ -86,7 +89,7 @@ static List determineJvmOptions(Path configDir, Path tmpDir, String envO substitutions.put("ES_PATH_CONF", configDir.toString()); try { - return parser.jvmOptions(configDir, envOptions, substitutions); + return parser.jvmOptions(args, keystore, configDir, tmpDir, envOptions, substitutions); } catch (final JvmOptionsFileParserException e) { final String errorMessage = String.format( Locale.ROOT, @@ -115,8 +118,14 @@ static List determineJvmOptions(Path configDir, Path tmpDir, String envO } } - private List jvmOptions(final Path config, final String esJavaOpts, final Map substitutions) - throws InterruptedException, IOException, JvmOptionsFileParserException { + private List jvmOptions( + ServerArgs args, + KeyStoreWrapper keystore, + final Path config, + Path tmpDir, + final String esJavaOpts, + final Map substitutions + ) throws InterruptedException, IOException, JvmOptionsFileParserException, UserException { final List jvmOptions = readJvmOptionsFiles(config); @@ -132,12 +141,15 @@ private List jvmOptions(final Path config, final String esJavaOpts, fina final List ergonomicJvmOptions = JvmErgonomics.choose(substitutedJvmOptions); final List systemJvmOptions = SystemJvmOptions.systemJvmOptions(); + final List apmOptions = APMJvmOptions.apmJvmOptions(args.nodeSettings(), keystore, tmpDir); + final List finalJvmOptions = new ArrayList<>( - systemJvmOptions.size() + substitutedJvmOptions.size() + ergonomicJvmOptions.size() + systemJvmOptions.size() + substitutedJvmOptions.size() + ergonomicJvmOptions.size() + apmOptions.size() ); finalJvmOptions.addAll(systemJvmOptions); // add the system JVM options first so that they can be overridden finalJvmOptions.addAll(substitutedJvmOptions); finalJvmOptions.addAll(ergonomicJvmOptions); + finalJvmOptions.addAll(apmOptions); return finalJvmOptions; } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java index c0a259d9f4699..73269b8c719fd 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java @@ -27,7 +27,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.monitor.jvm.JvmInfo; -import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; @@ -76,15 +75,21 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce validateConfig(options, env); - // setup security - final SecureString keystorePassword = getKeystorePassword(env.configFile(), terminal); - env = autoConfigureSecurity(terminal, options, processInfo, env, keystorePassword); + try (KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile())) { + // setup security + final SecureString keystorePassword = getKeystorePassword(keystore, terminal); + env = autoConfigureSecurity(terminal, options, processInfo, env, keystorePassword); - // install/remove plugins from elasticsearch-plugins.yml - syncPlugins(terminal, env, processInfo); + if (keystore != null) { + keystore.decrypt(keystorePassword.getChars()); + } + + // install/remove plugins from elasticsearch-plugins.yml + syncPlugins(terminal, env, processInfo); - ServerArgs args = createArgs(options, env, keystorePassword, processInfo); - this.server = startServer(terminal, processInfo, args); + ServerArgs args = createArgs(options, env, keystorePassword, processInfo); + this.server = startServer(terminal, processInfo, args, keystore); + } if (options.has(daemonizeOption)) { server.detach(); @@ -122,13 +127,11 @@ private void validateConfig(OptionSet options, Environment env) throws UserExcep } } - private static SecureString getKeystorePassword(Path configDir, Terminal terminal) throws IOException { - try (KeyStoreWrapper keystore = KeyStoreWrapper.load(configDir)) { - if (keystore != null && keystore.hasPassword()) { - return new SecureString(terminal.readSecret(KeyStoreWrapper.PROMPT)); - } else { - return new SecureString(new char[0]); - } + private static SecureString getKeystorePassword(KeyStoreWrapper keystore, Terminal terminal) { + if (keystore != null && keystore.hasPassword()) { + return new SecureString(terminal.readSecret(KeyStoreWrapper.PROMPT)); + } else { + return new SecureString(new char[0]); } } @@ -226,7 +229,8 @@ protected Command loadTool(String toolname, String libs) { } // protected to allow tests to override - protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) throws UserException { - return ServerProcess.start(terminal, processInfo, args); + protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args, KeyStoreWrapper keystore) + throws UserException { + return ServerProcess.start(terminal, processInfo, args, keystore); } } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java index 59152f0550f89..674f9f12c916b 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java @@ -15,6 +15,7 @@ import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; @@ -36,7 +37,7 @@ /** * A helper to control a {@link Process} running the main Elasticsearch server. * - *

The process can be started by calling {@link #start(Terminal, ProcessInfo, ServerArgs)}. + *

The process can be started by calling {@link #start(Terminal, ProcessInfo, ServerArgs, KeyStoreWrapper)}. * The process is controlled by internally sending arguments and control signals on stdin, * and receiving control signals on stderr. The start method does not return until the * server is ready to process requests and has exited the bootstrap thread. @@ -66,7 +67,8 @@ public class ServerProcess { // this allows mocking the process building by tests interface OptionsBuilder { - List getJvmOptions(Path configDir, Path tmpDir, String envOptions) throws InterruptedException, IOException, UserException; + List getJvmOptions(ServerArgs args, KeyStoreWrapper keyStoreWrapper, Path configDir, Path tmpDir, String envOptions) + throws InterruptedException, IOException, UserException; } // this allows mocking the process building by tests @@ -77,14 +79,16 @@ interface ProcessStarter { /** * Start a server in a new process. * - * @param terminal A terminal to connect the standard inputs and outputs to for the new process. - * @param processInfo Info about the current process, for passing through to the subprocess. - * @param args Arguments to the server process. + * @param terminal A terminal to connect the standard inputs and outputs to for the new process. + * @param processInfo Info about the current process, for passing through to the subprocess. + * @param args Arguments to the server process. + * @param keystore A keystore for accessing secrets. * @return A running server process that is ready for requests * @throws UserException If the process failed during bootstrap */ - public static ServerProcess start(Terminal terminal, ProcessInfo processInfo, ServerArgs args) throws UserException { - return start(terminal, processInfo, args, JvmOptionsParser::determineJvmOptions, ProcessBuilder::start); + public static ServerProcess start(Terminal terminal, ProcessInfo processInfo, ServerArgs args, KeyStoreWrapper keystore) + throws UserException { + return start(terminal, processInfo, args, keystore, JvmOptionsParser::determineJvmOptions, ProcessBuilder::start); } // package private so tests can mock options building and process starting @@ -92,6 +96,7 @@ static ServerProcess start( Terminal terminal, ProcessInfo processInfo, ServerArgs args, + KeyStoreWrapper keystore, OptionsBuilder optionsBuilder, ProcessStarter processStarter ) throws UserException { @@ -100,7 +105,7 @@ static ServerProcess start( boolean success = false; try { - jvmProcess = createProcess(processInfo, args.configDir(), optionsBuilder, processStarter); + jvmProcess = createProcess(args, keystore, processInfo, args.configDir(), optionsBuilder, processStarter); errorPump = new ErrorPumpThread(terminal.getErrorWriter(), jvmProcess.getErrorStream()); errorPump.start(); sendArgs(args, jvmProcess.getOutputStream()); @@ -193,6 +198,8 @@ private void sendShutdownMarker() { } private static Process createProcess( + ServerArgs args, + KeyStoreWrapper keystore, ProcessInfo processInfo, Path configDir, OptionsBuilder optionsBuilder, @@ -204,7 +211,7 @@ private static Process createProcess( envVars.put("LIBFFI_TMPDIR", tempDir.toString()); } - List jvmOptions = optionsBuilder.getJvmOptions(configDir, tempDir, envVars.remove("ES_JAVA_OPTS")); + List jvmOptions = optionsBuilder.getJvmOptions(args, keystore, configDir, tempDir, envVars.remove("ES_JAVA_OPTS")); // also pass through distribution type jvmOptions.add("-Des.distribution.type=" + processInfo.sysprops().get("es.distribution.type")); diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java index 2ccdfffe6cb07..7a189563801eb 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java @@ -436,7 +436,7 @@ protected Command loadTool(String toolname, String libs) { } @Override - protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) { + protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args, KeyStoreWrapper keystore) { if (argsValidator != null) { argsValidator.accept(args); } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java index 1834245ca7e16..f0fa37227119d 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java @@ -92,7 +92,7 @@ public void resetEnv() { envVars.clear(); esHomeDir = createTempDir(); nodeSettings = Settings.builder(); - optionsBuilder = (configDir, tmpDir, envOptions) -> new ArrayList<>(); + optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> new ArrayList<>(); processValidator = null; mainCallback = null; } @@ -201,7 +201,7 @@ ServerProcess startProcess(boolean daemonize, boolean quiet, String keystorePass process = new MockElasticsearchProcess(); return process; }; - return ServerProcess.start(terminal, pinfo, args, optionsBuilder, starter); + return ServerProcess.start(terminal, pinfo, args, null, optionsBuilder, starter); } public void testProcessBuilder() throws Exception { @@ -253,7 +253,9 @@ public void testStartError() throws Exception { } public void testOptionsBuildingInterrupted() throws Exception { - optionsBuilder = (configDir, tmpDir, envOptions) -> { throw new InterruptedException("interrupted while get jvm options"); }; + optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> { + throw new InterruptedException("interrupted while get jvm options"); + }; var e = expectThrows(RuntimeException.class, () -> runForeground()); assertThat(e.getCause().getMessage(), equalTo("interrupted while get jvm options")); } @@ -277,7 +279,7 @@ public void testLibffiEnv() throws Exception { } public void testTempDir() throws Exception { - optionsBuilder = (configDir, tmpDir, envOptions) -> { + optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> { assertThat(tmpDir.toString(), Files.exists(tmpDir), is(true)); assertThat(tmpDir.getFileName().toString(), startsWith("elasticsearch-")); return new ArrayList<>(); @@ -289,7 +291,7 @@ public void testTempDirWindows() throws Exception { Path baseTmpDir = createTempDir(); sysprops.put("os.name", "Windows 10"); sysprops.put("java.io.tmpdir", baseTmpDir.toString()); - optionsBuilder = (configDir, tmpDir, envOptions) -> { + optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> { assertThat(tmpDir.toString(), Files.exists(tmpDir), is(true)); assertThat(tmpDir.getFileName().toString(), equalTo("elasticsearch")); assertThat(tmpDir.getParent().toString(), equalTo(baseTmpDir.toString())); @@ -301,7 +303,7 @@ public void testTempDirWindows() throws Exception { public void testTempDirOverride() throws Exception { Path customTmpDir = createTempDir(); envVars.put("ES_TMPDIR", customTmpDir.toString()); - optionsBuilder = (configDir, tmpDir, envOptions) -> { + optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> { assertThat(tmpDir.toString(), equalTo(customTmpDir.toString())); return new ArrayList<>(); }; @@ -327,7 +329,7 @@ public void testTempDirOverrideNotADirectory() throws Exception { public void testCustomJvmOptions() throws Exception { envVars.put("ES_JAVA_OPTS", "-Dmyoption=foo"); - optionsBuilder = (configDir, tmpDir, envOptions) -> { + optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> { assertThat(envOptions, equalTo("-Dmyoption=foo")); return new ArrayList<>(); }; @@ -336,7 +338,7 @@ public void testCustomJvmOptions() throws Exception { } public void testCommandLineSysprops() throws Exception { - optionsBuilder = (configDir, tmpDir, envOptions) -> List.of("-Dfoo1=bar", "-Dfoo2=baz"); + optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> List.of("-Dfoo1=bar", "-Dfoo2=baz"); processValidator = pb -> { assertThat(pb.command(), contains("-Dfoo1=bar")); assertThat(pb.command(), contains("-Dfoo2=bar")); diff --git a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java index b6e7596d50c03..637bd09eb2cea 100644 --- a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java +++ b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java @@ -35,7 +35,7 @@ class WindowsServiceDaemon extends EnvironmentAwareCommand { @Override public void execute(Terminal terminal, OptionSet options, Environment env, ProcessInfo processInfo) throws Exception { var args = new ServerArgs(false, true, null, new SecureString(""), env.settings(), env.configFile()); - this.server = ServerProcess.start(terminal, processInfo, args); + this.server = ServerProcess.start(terminal, processInfo, args, null); // start does not return until the server is ready, and we do not wait for the process } diff --git a/docs/changelog/88443.yaml b/docs/changelog/88443.yaml new file mode 100644 index 0000000000000..bdb2a2e8bfce0 --- /dev/null +++ b/docs/changelog/88443.yaml @@ -0,0 +1,6 @@ +pr: 88443 +summary: Provide tracing implementation using OpenTelemetry and APM Java agent +area: Infra/Core +type: feature +issues: + - 84369 diff --git a/modules/apm/build.gradle b/modules/apm/build.gradle new file mode 100644 index 0000000000000..6bb1c544d096e --- /dev/null +++ b/modules/apm/build.gradle @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +apply plugin: 'elasticsearch.internal-es-plugin' + +esplugin { + name 'apm' + description 'Provides APM integration for Elasticsearch' + classname 'org.elasticsearch.tracing.apm.APM' +} + +dependencies { + implementation "io.opentelemetry:opentelemetry-api:1.15.0" + implementation "io.opentelemetry:opentelemetry-context:1.15.0" + implementation "io.opentelemetry:opentelemetry-semconv:1.15.0-alpha" + runtimeOnly "co.elastic.apm:elastic-apm-agent:1.33.0" +} + +tasks.named("dependencyLicenses").configure { + mapping from: /opentelemetry-.*/, to: 'opentelemetry' +} diff --git a/modules/apm/licenses/elastic-apm-agent-1.33.0.jar.sha1 b/modules/apm/licenses/elastic-apm-agent-1.33.0.jar.sha1 new file mode 100644 index 0000000000000..0c57ee857d0ba --- /dev/null +++ b/modules/apm/licenses/elastic-apm-agent-1.33.0.jar.sha1 @@ -0,0 +1 @@ +2a36d1338fde40250a6c0ffe9bfc8cf96a3fd962 \ No newline at end of file diff --git a/modules/apm/licenses/elastic-apm-agent-LICENSE.txt b/modules/apm/licenses/elastic-apm-agent-LICENSE.txt new file mode 100644 index 0000000000000..953a6d21ca18e --- /dev/null +++ b/modules/apm/licenses/elastic-apm-agent-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elastic and contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/apm/licenses/elastic-apm-agent-NOTICE.txt b/modules/apm/licenses/elastic-apm-agent-NOTICE.txt new file mode 100644 index 0000000000000..1e21cb1fb60ec --- /dev/null +++ b/modules/apm/licenses/elastic-apm-agent-NOTICE.txt @@ -0,0 +1,465 @@ +Elastic APM Java Agent +Copyright 2018-2022 Elasticsearch B.V. + +############################################################################### + +This product includes software licensed under the Apache License 2.0 from the +following sources: + - stagemonitor - Copyright 2014-2017 iSYS Software GmbH + - micrometer + - https://github.com/raphw/weak-lock-free + - openzipkin/brave + - LMAX Disruptor - Copyright 2011 LMAX Ltd. + - Byte Buddy (https://bytebuddy.net) - Copyright Rafael Winterhalter + - JCTools + - https://github.com/jvm-profiling-tools/async-profiler + - https://github.com/real-logic/agrona + - Apache Log4j 2 - https://logging.apache.org/log4j/2.x/license.html + +------------------------------------------------------------------------------ +stagemonitor NOTICE + +stagemonitor +Copyright 2014-2017 iSYS Software GmbH + +This product includes software developed at +iSYS Software GmbH (http://www.isys-software.de/). + +This product bundles jQuery treetable 3.2.0, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/jquery-treetable/jquery.treetable.js + +This product bundles Twitter Bootstrap 3.3.2, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/bootstrap/bootstrap.min.css + +This product bundles typeahead.js-bootstrap3.less, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/typeahead.css + +This product bundles typeahead.js, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/typeahead.jquery.min.js + +This product bundles Handlebars 1.3.0, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/handlebars.min.js + +This product bundles jQuery 1.11.1, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/jquery.1.11.1.min.js + +This product bundles jQuery serializeObject, which is available under the "BSD" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/jquery.serialize-object.min.js + +This product bundles Bootstrap Growl 2.0.1, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/bootstrap/bootstrap-growl.min.js + +This product bundles Animate.css, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/animate/animate.min.css + +This product native sigar bindings, which are available under the "Apache 2.0" license. For details, see +stagemonitor-os/src/main/resources/sigar + +This product bundles DataTables 1.10.3, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/datatables/jquery.dataTables.min.js + +This product bundles Flot, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/flot/jquery.flot.min.js + +This product bundles Flot, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/flot/jquery.flot.resize.min.js + +This product bundles Flot stack plugin, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/flot/jquery.flot.stack.original.js + +This product bundles Flot time plugin, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/flot/jquery.flot.time.min.js + +This product bundles Flot tooltip plugin, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/stagemonitor/static/flot/jquery.flot.tooltip.min.js + +This product bundles weasel, which is available under the "MIT" license. For details, see +stagemonitor-web-servlet/src/main/resources/eum.debug.js + +This product includes code derived from the Metrics project, which is available under the "Apache 2.0" license. For details, see +stagemonitor-web-servlet/src/main/java/org/stagemonitor/web/metrics/StagemonitorMetricsServlet.java + +This product includes code derived from https://github.com/raphw/weak-lock-free/, which is available under the "Apache 2.0" license. For details, see +stagemonitor-core/src/main/java/org/stagemonitor/core/instrument/WeakConcurrentMap.java + +This product includes code derived from https://github.com/prometheus/client_java/blob/master/simpleclient_dropwizard/src/main/java/io/prometheus/client/dropwizard/DropwizardExports.java, which is available under the "Apache 2.0" license. For details, see +stagemonitor-core/src/main/java/org/stagemonitor/core/metrics/prometheus/StagemonitorPrometheusCollector.java + +This product includes code from https://github.com/uber/jaeger-client-java, which is available under the "MIT" license. +stagemonitor/stagemonitor-tracing/src/main/java/org/stagemonitor/tracing/utils/RateLimiter.java + +This product includes code derived from Google Guava, which is available under the "Apache 2.0" license. For details, see +stagemonitor-core/src/main/java/org/stagemonitor/core/util/InetAddresses.java +stagemonitor-core/src/main/java/org/stagemonitor/core/util/Ints.java +stagemonitor-core/src/main/java/org/stagemonitor/core/util/Assert.java + +This product includes code from Spring Framework, which is available under the "Apache 2.0" license. For details, see +stagemonitor-web-servlet/src/main/java/org/stagemonitor/web/servlet/util/AntPathMatcher.java + +------------------------------------------------------------------------------ +async-profiler NOTICE + +async-profiler +Copyright 2018 - 2020 Andrei Pangin + +This product includes software licensed under CDDL 1.0 from the +following sources: + +This product includes a specialized C++ port of the FlameGraph script, licensed under CDDL, available at +https://github.com/brendangregg/FlameGraph/blob/master/flamegraph.pl + +Copyright 2016 Netflix, Inc. +Copyright 2011 Joyent, Inc. All rights reserved. +Copyright 2011 Brendan Gregg. All rights reserved. + +CDDL HEADER START + +The contents of this file are subject to the terms of the +Common Development and Distribution License (the "License"). +You may not use this file except in compliance with the License. + +You can obtain a copy of the license at docs/cddl1.txt or +http://opensource.org/licenses/CDDL-1.0. +See the License for the specific language governing permissions +and limitations under the License. + +When distributing Covered Code, include this CDDL HEADER in each +file and include the License file at docs/cddl1.txt. +If applicable, add the following below this CDDL HEADER, with the +fields enclosed by brackets "[]" replaced with your own identifying +information: Portions Copyright [yyyy] [name of copyright owner] + +CDDL HEADER END + +------------------------------------------------------------------------------ +Apache Log4j NOTICE + +Apache Log4j +Copyright 1999-2021 Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +ResolverUtil.java +Copyright 2005-2006 Tim Fennell + +Dumbster SMTP test server +Copyright 2004 Jason Paul Kitchen + +TypeUtil.java +Copyright 2002-2012 Ramnivas Laddad, Juergen Hoeller, Chris Beams + +picocli (http://picocli.info) +Copyright 2017 Remko Popma + +------------------------------------------------------------------------------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +############################################################################### + +This product includes code from https://github.com/ngs-doo/dsl-json, +under The BSD 3-Clause License: + +Copyright (c) 2015, Nova Generacija Softvera d.o.o. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Nova Generacija Softvera d.o.o. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +############################################################################### + +This product includes code from slf4j, under MIT License. +It also includes code that is based on some slf4j interfaces. + +Copyright (c) 2004-2011 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +############################################################################### + +This product includes code from HdrHistogram, dual-licensed under CC0 and BSD 2-Clause License + +The code in this repository code was Written by Gil Tene, Michael Barker, +and Matt Warren, and released to the public domain, as explained at +http://creativecommons.org/publicdomain/zero/1.0/ + +For users of this code who wish to consume it under the "BSD" license +rather than under the public domain or CC0 contribution text mentioned +above, the code found under this directory is *also* provided under the +following license (commonly referred to as the BSD 2-Clause License). This +license does not detract from the above stated release of the code into +the public domain, and simply represents an additional license granted by +the Author. + +----------------------------------------------------------------------------- +** Beginning of "BSD 2-Clause License" text. ** + + Copyright (c) 2012, 2013, 2014, 2015, 2016 Gil Tene + Copyright (c) 2014 Michael Barker + Copyright (c) 2014 Matt Warren + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. + +############################################################################### diff --git a/modules/apm/licenses/opentelemetry-LICENSE.txt b/modules/apm/licenses/opentelemetry-LICENSE.txt new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/modules/apm/licenses/opentelemetry-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/apm/licenses/opentelemetry-NOTICE.txt b/modules/apm/licenses/opentelemetry-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/apm/licenses/opentelemetry-api-1.15.0.jar.sha1 b/modules/apm/licenses/opentelemetry-api-1.15.0.jar.sha1 new file mode 100644 index 0000000000000..88ed7a5eba461 --- /dev/null +++ b/modules/apm/licenses/opentelemetry-api-1.15.0.jar.sha1 @@ -0,0 +1 @@ +549bf64119e092bb9917e73601e9ebb508b5a2e3 \ No newline at end of file diff --git a/modules/apm/licenses/opentelemetry-context-1.15.0.jar.sha1 b/modules/apm/licenses/opentelemetry-context-1.15.0.jar.sha1 new file mode 100644 index 0000000000000..1cdac3dd6e654 --- /dev/null +++ b/modules/apm/licenses/opentelemetry-context-1.15.0.jar.sha1 @@ -0,0 +1 @@ +4401a67f7aef7af786012965408ecb5172f19e2f \ No newline at end of file diff --git a/modules/apm/licenses/opentelemetry-semconv-1.15.0-alpha.jar.sha1 b/modules/apm/licenses/opentelemetry-semconv-1.15.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..448fcea25e326 --- /dev/null +++ b/modules/apm/licenses/opentelemetry-semconv-1.15.0-alpha.jar.sha1 @@ -0,0 +1 @@ +ffbb3697b70da736b72bd55ed45005049dc8df54 \ No newline at end of file diff --git a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APM.java b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APM.java new file mode 100644 index 0000000000000..3b990c26ba649 --- /dev/null +++ b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APM.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.tracing.apm; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.plugins.NetworkPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.TracerPlugin; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xcontent.NamedXContentRegistry; + +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +/** + * This module integrates Elastic's APM product with Elasticsearch. Elasticsearch has + * a {@link org.elasticsearch.tracing.Tracer} interface, which this module implements via + * {@link APMTracer}. We use the OpenTelemetry API to capture "spans", and attach the + * Elastic APM Java to ship those spans to an APM server. Although it is possible to + * programmatically attach the agent, the Security Manager permissions required for this + * make this approach difficult to the point of impossibility. + *

+ * All settings are found under the tracing.apm. prefix. Any setting under + * the tracing.apm.agent. prefix will be forwarded on to the APM Java agent + * by setting appropriate system properties. Some settings can only be set once, and must be + * set when the agent starts. We therefore also create and configure a config file in + * the {@code APMJvmOptions} class, which we then delete when Elasticsearch starts, so that + * sensitive settings such as secret_token or api_key are not + * left on disk. + *

+ * When settings are reconfigured using the settings REST API, the new values will again + * be passed via system properties to the Java agent, which periodically checks for changes + * and applies the new settings values, provided those settings can be dynamically updated. + */ +public class APM extends Plugin implements NetworkPlugin, TracerPlugin { + private final SetOnce tracer = new SetOnce<>(); + private final Settings settings; + + public APM(Settings settings) { + this.settings = settings; + } + + @Override + public Tracer getTracer(Settings settings) { + final APMTracer apmTracer = new APMTracer(settings); + tracer.set(apmTracer); + return apmTracer; + } + + @Override + public Collection createComponents( + Client client, + ClusterService clusterService, + ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + Environment environment, + NodeEnvironment nodeEnvironment, + NamedWriteableRegistry namedWriteableRegistry, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier repositoriesServiceSupplier, + Tracer unused + ) { + final APMTracer apmTracer = tracer.get(); + + apmTracer.setClusterName(clusterService.getClusterName().value()); + apmTracer.setNodeName(clusterService.getNodeName()); + + final APMAgentSettings apmAgentSettings = new APMAgentSettings(); + apmAgentSettings.syncAgentSystemProperties(settings); + apmAgentSettings.addClusterSettingsListeners(clusterService, apmTracer); + + return List.of(apmTracer); + } + + @Override + public List> getSettings() { + return List.of( + APMAgentSettings.APM_ENABLED_SETTING, + APMAgentSettings.APM_TRACING_NAMES_INCLUDE_SETTING, + APMAgentSettings.APM_TRACING_NAMES_EXCLUDE_SETTING, + APMAgentSettings.APM_AGENT_SETTINGS, + APMAgentSettings.APM_SECRET_TOKEN_SETTING, + APMAgentSettings.APM_API_KEY_SETTING + ); + } +} diff --git a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMAgentSettings.java new file mode 100644 index 0000000000000..140d48027a59f --- /dev/null +++ b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMAgentSettings.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.tracing.apm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.SuppressForbidden; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.common.settings.Setting.Property.NodeScope; +import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic; + +/** + * This class is responsible for APM settings, both for Elasticsearch and the APM Java agent. + * The methods could all be static, however they are not in order to make unit testing easier. + */ +class APMAgentSettings { + + private static final Logger LOGGER = LogManager.getLogger(APMAgentSettings.class); + + /** + * Sensible defaults that Elasticsearch configures. This cannot be done via the APM agent + * config file, as then their values could not be overridden dynamically via system properties. + */ + // tag::noformat + static Map APM_AGENT_DEFAULT_SETTINGS = Map.of( + "transaction_sample_rate", "0.2" + ); + // end::noformat + + void addClusterSettingsListeners(ClusterService clusterService, APMTracer apmTracer) { + final ClusterSettings clusterSettings = clusterService.getClusterSettings(); + clusterSettings.addSettingsUpdateConsumer(APM_ENABLED_SETTING, enabled -> { + apmTracer.setEnabled(enabled); + // The agent records data other than spans, e.g. JVM metrics, so we toggle this setting in order to + // minimise its impact to a running Elasticsearch. + this.setAgentSetting("recording", Boolean.toString(enabled)); + }); + clusterSettings.addSettingsUpdateConsumer(APM_TRACING_NAMES_INCLUDE_SETTING, apmTracer::setIncludeNames); + clusterSettings.addSettingsUpdateConsumer(APM_TRACING_NAMES_EXCLUDE_SETTING, apmTracer::setExcludeNames); + clusterSettings.addAffixMapUpdateConsumer(APM_AGENT_SETTINGS, map -> map.forEach(this::setAgentSetting), (x, y) -> {}); + } + + /** + * Copies APM settings from the provided settings object into the corresponding system properties. + * @param settings the settings to apply + */ + void syncAgentSystemProperties(Settings settings) { + this.setAgentSetting("recording", Boolean.toString(APM_ENABLED_SETTING.get(settings))); + + // Apply default values for some system properties. Although we configure + // the settings in APM_AGENT_DEFAULT_SETTINGS to defer to the default values, they won't + // do anything if those settings are never configured. + APM_AGENT_DEFAULT_SETTINGS.keySet() + .forEach( + key -> this.setAgentSetting(key, APM_AGENT_SETTINGS.getConcreteSetting(APM_AGENT_SETTINGS.getKey() + key).get(settings)) + ); + + // Then apply values from the settings in the cluster state + APM_AGENT_SETTINGS.getAsMap(settings).forEach(this::setAgentSetting); + } + + /** + * Copies a setting to the APM agent's system properties under elastic.apm, either + * by setting the property if {@code value} has a value, or by deleting the property if it doesn't. + * @param key the config key to set, without any prefix + * @param value the value to set, or null + */ + @SuppressForbidden(reason = "Need to be able to manipulate APM agent-related properties to set them dynamically") + void setAgentSetting(String key, String value) { + final String completeKey = "elastic.apm." + Objects.requireNonNull(key); + AccessController.doPrivileged((PrivilegedAction) () -> { + if (value == null || value.isEmpty()) { + LOGGER.trace("Clearing system property [{}]", completeKey); + System.clearProperty(completeKey); + } else { + LOGGER.trace("Setting setting property [{}] to [{}]", completeKey, value); + System.setProperty(completeKey, value); + } + return null; + }); + } + + private static final String APM_SETTING_PREFIX = "tracing.apm."; + + /** + * A list of APM agent config keys that should never be configured by the user. + */ + private static final List PROHIBITED_AGENT_KEYS = List.of( + // ES generates a config file and sets this value + "config_file", + // ES controls this via `tracing.apm.enabled` + "recording" + ); + + static final Setting.AffixSetting APM_AGENT_SETTINGS = Setting.prefixKeySetting( + APM_SETTING_PREFIX + "agent.", + (qualifiedKey) -> { + final String[] parts = qualifiedKey.split("\\."); + final String key = parts[parts.length - 1]; + final String defaultValue = APM_AGENT_DEFAULT_SETTINGS.getOrDefault(key, ""); + return new Setting<>(qualifiedKey, defaultValue, (value) -> { + if (PROHIBITED_AGENT_KEYS.contains(key)) { + throw new IllegalArgumentException("Explicitly configuring [" + qualifiedKey + "] is prohibited"); + } + return value; + }, Setting.Property.NodeScope, Setting.Property.OperatorDynamic); + } + ); + + static final Setting> APM_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting( + APM_SETTING_PREFIX + "names.include", + Collections.emptyList(), + Function.identity(), + OperatorDynamic, + NodeScope + ); + + static final Setting> APM_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting( + APM_SETTING_PREFIX + "names.exclude", + Collections.emptyList(), + Function.identity(), + OperatorDynamic, + NodeScope + ); + + static final Setting APM_ENABLED_SETTING = Setting.boolSetting( + APM_SETTING_PREFIX + "enabled", + false, + OperatorDynamic, + NodeScope + ); + + static final Setting APM_SECRET_TOKEN_SETTING = SecureSetting.secureString(APM_SETTING_PREFIX + "secret_token", null); + + static final Setting APM_API_KEY_SETTING = SecureSetting.secureString(APM_SETTING_PREFIX + "api_key", null); +} diff --git a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java new file mode 100644 index 0000000000000..d1d7ce113b344 --- /dev/null +++ b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java @@ -0,0 +1,394 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.tracing.apm; + +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanBuilder; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.Scope; +import io.opentelemetry.context.propagation.TextMapGetter; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.apache.lucene.util.automaton.Operations; +import org.apache.lucene.util.automaton.RegExp; +import org.elasticsearch.Version; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.tasks.Task; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_ENABLED_SETTING; +import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_NAMES_EXCLUDE_SETTING; +import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_NAMES_INCLUDE_SETTING; + +/** + * This is an implementation of the {@link org.elasticsearch.tracing.Tracer} interface, which uses + * the OpenTelemetry API to capture spans. + *

+ * This module doesn't provide an implementation of the OTel API. Normally that would mean that the + * API's default, no-op implementation would be used. However, when the APM Java is attached, it + * intercepts the {@link GlobalOpenTelemetry} class and provides its own implementation instead. + */ +public class APMTracer extends AbstractLifecycleComponent implements org.elasticsearch.tracing.Tracer { + + private static final Logger logger = LogManager.getLogger(APMTracer.class); + + /** Holds in-flight span information. */ + private final Map spans = ConcurrentCollections.newConcurrentMap(); + + private volatile boolean enabled; + private volatile APMServices services; + + private List includeNames; + private List excludeNames; + /** Built using {@link #includeNames} and {@link #excludeNames}, and filters out spans based on their name. */ + private volatile CharacterRunAutomaton filterAutomaton; + private String clusterName; + private String nodeName; + + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public void setNodeName(String nodeName) { + this.nodeName = nodeName; + } + + /** + * This class is used to make all OpenTelemetry services visible at once + */ + record APMServices(Tracer tracer, OpenTelemetry openTelemetry) {} + + public APMTracer(Settings settings) { + this.includeNames = APM_TRACING_NAMES_INCLUDE_SETTING.get(settings); + this.excludeNames = APM_TRACING_NAMES_EXCLUDE_SETTING.get(settings); + this.filterAutomaton = buildAutomaton(includeNames, excludeNames); + this.enabled = APM_ENABLED_SETTING.get(settings); + } + + void setEnabled(boolean enabled) { + this.enabled = enabled; + if (enabled) { + this.services = createApmServices(); + } else { + destroyApmServices(); + } + } + + void setIncludeNames(List includeNames) { + this.includeNames = includeNames; + this.filterAutomaton = buildAutomaton(includeNames, excludeNames); + } + + void setExcludeNames(List excludeNames) { + this.excludeNames = excludeNames; + this.filterAutomaton = buildAutomaton(includeNames, excludeNames); + } + + @Override + protected void doStart() { + if (enabled) { + this.services = createApmServices(); + } + } + + @Override + protected void doStop() { + destroyApmServices(); + } + + @Override + protected void doClose() {} + + private APMServices createApmServices() { + assert this.enabled; + assert this.services == null; + + return AccessController.doPrivileged((PrivilegedAction) () -> { + var openTelemetry = GlobalOpenTelemetry.get(); + var tracer = openTelemetry.getTracer("elasticsearch", Version.CURRENT.toString()); + + return new APMServices(tracer, openTelemetry); + }); + } + + private void destroyApmServices() { + this.services = null; + this.spans.clear();// discard in-flight spans + } + + @Override + public void startTrace(ThreadContext threadContext, String spanId, String spanName, @Nullable Map attributes) { + assert threadContext != null; + assert spanId != null; + assert spanName != null; + + // If tracing has been disabled, return immediately + var services = this.services; + if (services == null) { + return; + } + + if (filterAutomaton.run(spanName) == false) { + logger.trace("Skipping tracing [{}] [{}] as it has been filtered out", spanId, spanName); + return; + } + + spans.computeIfAbsent(spanId, _spanId -> AccessController.doPrivileged((PrivilegedAction) () -> { + logger.trace("Tracing [{}] [{}]", spanId, spanName); + final SpanBuilder spanBuilder = services.tracer.spanBuilder(spanName); + + // A span can have a parent span, which here is modelled though a parent span context. + // Setting this is important for seeing a complete trace in the APM UI. + final Context parentContext = getParentContext(threadContext); + if (parentContext != null) { + spanBuilder.setParent(parentContext); + } + + setSpanAttributes(threadContext, attributes, spanBuilder); + final Span span = spanBuilder.startSpan(); + final Context contextForNewSpan = Context.current().with(span); + + updateThreadContext(threadContext, services, contextForNewSpan); + + return contextForNewSpan; + })); + } + + private static void updateThreadContext(ThreadContext threadContext, APMServices services, Context context) { + // The new span context can be used as the parent context directly within the same Java process... + threadContext.putTransient(Task.APM_TRACE_CONTEXT, context); + + // ...whereas for tasks sent to other ES nodes, we need to put trace HTTP headers into the threadContext so + // that they can be propagated. + services.openTelemetry.getPropagators().getTextMapPropagator().inject(context, threadContext, (tc, key, value) -> { + if (isSupportedContextKey(key)) { + tc.putHeader(key, value); + } + }); + } + + private Context getParentContext(ThreadContext threadContext) { + // https://github.com/open-telemetry/opentelemetry-java/discussions/2884#discussioncomment-381870 + // If you just want to propagate across threads within the same process, you don't need context propagators (extract/inject). + // You can just pass the Context object directly to another thread (it is immutable and thus thread-safe). + + // Attempt to fetch a local parent context first, otherwise look for a remote parent + Context parentContext = threadContext.getTransient("parent_" + Task.APM_TRACE_CONTEXT); + if (parentContext == null) { + final String traceParentHeader = threadContext.getTransient("parent_" + Task.TRACE_PARENT_HTTP_HEADER); + final String traceStateHeader = threadContext.getTransient("parent_" + Task.TRACE_STATE); + + if (traceParentHeader != null) { + final Map traceContextMap = Maps.newMapWithExpectedSize(2); + // traceparent and tracestate should match the keys used by W3CTraceContextPropagator + traceContextMap.put(Task.TRACE_PARENT_HTTP_HEADER, traceParentHeader); + if (traceStateHeader != null) { + traceContextMap.put(Task.TRACE_STATE, traceStateHeader); + } + parentContext = services.openTelemetry.getPropagators() + .getTextMapPropagator() + .extract(Context.current(), traceContextMap, new MapKeyGetter()); + } + } + return parentContext; + } + + /** + * Most of the examples of how to use the OTel API look something like this, where the span context + * is automatically propagated: + * + *

{@code
+     * Span span = tracer.spanBuilder("parent").startSpan();
+     * try (Scope scope = parentSpan.makeCurrent()) {
+     *     // ...do some stuff, possibly creating further spans
+     * } finally {
+     *     span.end();
+     * }
+     * }
+ * This typically isn't useful in Elasticsearch, because a {@link Scope} can't be used across threads. + * However, if a scope is active, then the APM agent can capture additional information, so this method + * exists to make it possible to use scopes in the few situation where it makes sense. + * + * @param spanId the ID of a currently-open span for which to open a scope. + * @return a method to close the scope when you are finished with it. + */ + @Override + public Releasable withScope(String spanId) { + final Context context = spans.get(spanId); + if (context != null) { + var scope = context.makeCurrent(); + return scope::close; + } + return () -> {}; + } + + private void setSpanAttributes(ThreadContext threadContext, @Nullable Map spanAttributes, SpanBuilder spanBuilder) { + if (spanAttributes != null) { + for (Map.Entry entry : spanAttributes.entrySet()) { + final String key = entry.getKey(); + final Object value = entry.getValue(); + if (value instanceof String) { + spanBuilder.setAttribute(key, (String) value); + } else if (value instanceof Long) { + spanBuilder.setAttribute(key, (Long) value); + } else if (value instanceof Integer) { + spanBuilder.setAttribute(key, (Integer) value); + } else if (value instanceof Double) { + spanBuilder.setAttribute(key, (Double) value); + } else if (value instanceof Boolean) { + spanBuilder.setAttribute(key, (Boolean) value); + } else { + throw new IllegalArgumentException( + "span attributes do not support value type of [" + value.getClass().getCanonicalName() + "]" + ); + } + } + + final boolean isHttpSpan = spanAttributes.keySet().stream().anyMatch(key -> key.startsWith("http.")); + spanBuilder.setSpanKind(isHttpSpan ? SpanKind.SERVER : SpanKind.INTERNAL); + } else { + spanBuilder.setSpanKind(SpanKind.INTERNAL); + } + + spanBuilder.setAttribute(org.elasticsearch.tracing.Tracer.AttributeKeys.NODE_NAME, nodeName); + spanBuilder.setAttribute(org.elasticsearch.tracing.Tracer.AttributeKeys.CLUSTER_NAME, clusterName); + + final String xOpaqueId = threadContext.getHeader(Task.X_OPAQUE_ID_HTTP_HEADER); + if (xOpaqueId != null) { + spanBuilder.setAttribute("es.x-opaque-id", xOpaqueId); + } + } + + @Override + public void addError(String spanId, Throwable throwable) { + final var span = Span.fromContextOrNull(spans.get(spanId)); + if (span != null) { + span.recordException(throwable); + } + } + + @Override + public void setAttribute(String spanId, String key, boolean value) { + final var span = Span.fromContextOrNull(spans.get(spanId)); + if (span != null) { + span.setAttribute(key, value); + } + } + + @Override + public void setAttribute(String spanId, String key, double value) { + final var span = Span.fromContextOrNull(spans.get(spanId)); + if (span != null) { + span.setAttribute(key, value); + } + } + + @Override + public void setAttribute(String spanId, String key, long value) { + final var span = Span.fromContextOrNull(spans.get(spanId)); + if (span != null) { + span.setAttribute(key, value); + } + } + + @Override + public void setAttribute(String spanId, String key, String value) { + final var span = Span.fromContextOrNull(spans.get(spanId)); + if (span != null) { + span.setAttribute(key, value); + } + } + + @Override + public void stopTrace(String spanId) { + final var span = Span.fromContextOrNull(spans.remove(spanId)); + if (span != null) { + logger.trace("Finishing trace [{}]", spanId); + span.end(); + } + } + + @Override + public void addEvent(String spanId, String eventName) { + final var span = Span.fromContextOrNull(spans.get(spanId)); + if (span != null) { + span.addEvent(eventName); + } + } + + private static class MapKeyGetter implements TextMapGetter> { + + @Override + public Iterable keys(Map carrier) { + return carrier.keySet().stream().filter(APMTracer::isSupportedContextKey).collect(Collectors.toSet()); + } + + @Override + public String get(Map carrier, String key) { + return carrier.get(key); + } + } + + private static boolean isSupportedContextKey(String key) { + return Task.TRACE_PARENT_HTTP_HEADER.equals(key) || Task.TRACE_STATE.equals(key); + } + + // VisibleForTesting + Map getSpans() { + return spans; + } + + private static CharacterRunAutomaton buildAutomaton(List includeNames, List excludeNames) { + Automaton includeAutomaton = patternsToAutomaton(includeNames); + Automaton excludeAutomaton = patternsToAutomaton(excludeNames); + + if (includeAutomaton == null) { + includeAutomaton = Automata.makeAnyString(); + } + + final Automaton finalAutomaton = excludeAutomaton == null + ? includeAutomaton + : Operations.minus(includeAutomaton, excludeAutomaton, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT); + + return new CharacterRunAutomaton(finalAutomaton); + } + + private static Automaton patternsToAutomaton(List patterns) { + final List automata = patterns.stream().map(s -> { + final String regex = s.replaceAll("\\.", "\\\\.").replaceAll("\\*", ".*"); + return new RegExp(regex).toAutomaton(); + }).toList(); + if (automata.isEmpty()) { + return null; + } + if (automata.size() == 1) { + return automata.get(0); + } + return Operations.union(automata); + } +} diff --git a/modules/apm/src/main/plugin-metadata/plugin-security.policy b/modules/apm/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..c2c58659d6f5e --- /dev/null +++ b/modules/apm/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +grant { + permission java.lang.RuntimePermission "accessSystemModules"; + permission java.lang.RuntimePermission "createClassLoader"; + permission java.lang.RuntimePermission "getClassLoader"; + permission java.util.PropertyPermission "elastic.apm.*", "write"; +}; + +grant codeBase "${codebase.elastic-apm-agent}" { + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.RuntimePermission "setContextClassLoader"; + permission java.lang.RuntimePermission "setFactory"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission java.net.SocketPermission "*", "connect,resolve"; +}; diff --git a/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMAgentSettingsTests.java b/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMAgentSettingsTests.java new file mode 100644 index 0000000000000..35328c5dd2461 --- /dev/null +++ b/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMAgentSettingsTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.tracing.apm; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_AGENT_SETTINGS; +import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_ENABLED_SETTING; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class APMAgentSettingsTests extends ESTestCase { + + /** + * Check that when the tracer is enabled, it also sets the APM agent's recording system property to true. + */ + public void test_whenTracerEnabled_setsRecordingProperty() { + APMAgentSettings apmAgentSettings = spy(new APMAgentSettings()); + Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), true).build(); + apmAgentSettings.syncAgentSystemProperties(settings); + + verify(apmAgentSettings).setAgentSetting("recording", "true"); + } + + /** + * Check that when the tracer is disabled, it also sets the APM agent's recording system property to false. + */ + public void test_whenTracerDisabled_setsRecordingProperty() { + APMAgentSettings apmAgentSettings = spy(new APMAgentSettings()); + Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), false).build(); + apmAgentSettings.syncAgentSystemProperties(settings); + + verify(apmAgentSettings).setAgentSetting("recording", "false"); + } + + /** + * Check that when cluster settings are synchronised with the system properties, default values are + * applied. + */ + public void test_whenTracerCreated_defaultSettingsApplied() { + APMAgentSettings apmAgentSettings = spy(new APMAgentSettings()); + Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), true).build(); + apmAgentSettings.syncAgentSystemProperties(settings); + + verify(apmAgentSettings).setAgentSetting("transaction_sample_rate", "0.2"); + } + + /** + * Check that when cluster settings are synchronised with the system properties, values in the settings + * are reflected in the system properties, overwriting default values. + */ + public void test_whenTracerCreated_clusterSettingsOverrideDefaults() { + APMAgentSettings apmAgentSettings = spy(new APMAgentSettings()); + Settings settings = Settings.builder() + .put(APM_ENABLED_SETTING.getKey(), true) + .put(APM_AGENT_SETTINGS.getKey() + "transaction_sample_rate", "0.75") + .build(); + apmAgentSettings.syncAgentSystemProperties(settings); + + // This happens twice because we first apply the default settings, whose values are overridden + // from the cluster settings, then we apply all the APM-agent related settings, not just the + // ones with default values. Although there is some redundancy here, it only happens at startup + // for a very small number of settings. + verify(apmAgentSettings, times(2)).setAgentSetting("transaction_sample_rate", "0.75"); + } + + /** + * Check that when cluster settings are synchronised with the system properties, agent settings other + * than those with default values are set. + */ + public void test_whenTracerCreated_clusterSettingsAlsoApplied() { + APMAgentSettings apmAgentSettings = spy(new APMAgentSettings()); + Settings settings = Settings.builder() + .put(APM_ENABLED_SETTING.getKey(), true) + .put(APM_AGENT_SETTINGS.getKey() + "span_compression_enabled", "true") + .build(); + apmAgentSettings.syncAgentSystemProperties(settings); + + verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true"); + } +} diff --git a/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMTracerTests.java b/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMTracerTests.java new file mode 100644 index 0000000000000..f4ab36d43aa48 --- /dev/null +++ b/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMTracerTests.java @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.tracing.apm; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_ENABLED_SETTING; +import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_NAMES_EXCLUDE_SETTING; +import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_NAMES_INCLUDE_SETTING; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; + +public class APMTracerTests extends ESTestCase { + + /** + * Check that the tracer doesn't create spans when tracing is disabled. + */ + public void test_onTraceStarted_withTracingDisabled_doesNotStartTrace() { + Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), false).build(); + APMTracer apmTracer = buildTracer(settings); + + apmTracer.startTrace(new ThreadContext(settings), "id1", "name1", null); + + assertThat(apmTracer.getSpans(), anEmptyMap()); + } + + /** + * Check that the tracer doesn't create spans if a Traceable's span name is filtered out. + */ + public void test_onTraceStarted_withSpanNameOmitted_doesNotStartTrace() { + Settings settings = Settings.builder() + .put(APM_ENABLED_SETTING.getKey(), true) + .putList(APM_TRACING_NAMES_INCLUDE_SETTING.getKey(), List.of("filtered*")) + .build(); + APMTracer apmTracer = buildTracer(settings); + + apmTracer.startTrace(new ThreadContext(settings), "id1", "name1", null); + + assertThat(apmTracer.getSpans(), anEmptyMap()); + } + + /** + * Check that when a trace is started, the tracer starts a span and records it. + */ + public void test_onTraceStarted_startsTrace() { + Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), true).build(); + APMTracer apmTracer = buildTracer(settings); + + apmTracer.startTrace(new ThreadContext(settings), "id1", "name1", null); + + assertThat(apmTracer.getSpans(), aMapWithSize(1)); + assertThat(apmTracer.getSpans(), hasKey("id1")); + } + + /** + * Check that when a trace is started, the tracer ends the span and removes the record of it. + */ + public void test_onTraceStopped_stopsTrace() { + Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), true).build(); + APMTracer apmTracer = buildTracer(settings); + + apmTracer.startTrace(new ThreadContext(settings), "id1", "name1", null); + apmTracer.stopTrace("id1"); + + assertThat(apmTracer.getSpans(), anEmptyMap()); + } + + /** + * Check that when a trace is started, then the thread context is updated with tracing information. + *

+ * We expect the APM agent to inject the {@link Task#TRACE_PARENT_HTTP_HEADER} and {@link Task#TRACE_STATE} + * headers into the context, and it does, but this doesn't happen in the unit tests. We can + * check that the local context object is added, however. + */ + public void test_whenTraceStarted_threadContextIsPopulated() { + Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), true).build(); + APMTracer apmTracer = buildTracer(settings); + + ThreadContext threadContext = new ThreadContext(settings); + apmTracer.startTrace(threadContext, "id1", "name1", null); + assertThat(threadContext.getTransient(Task.APM_TRACE_CONTEXT), notNullValue()); + } + + /** + * Check that when a tracer has a list of include names configured, then those + * names are used to filter spans. + */ + public void test_whenTraceStarted_andSpanNameIncluded_thenSpanIsStarted() { + final List includePatterns = List.of( + // exact name + "name-aaa", + // regex + "name-b*" + ); + Settings settings = Settings.builder() + .put(APM_ENABLED_SETTING.getKey(), true) + .putList(APM_TRACING_NAMES_INCLUDE_SETTING.getKey(), includePatterns) + .build(); + APMTracer apmTracer = buildTracer(settings); + + apmTracer.startTrace(new ThreadContext(settings), "id1", "name-aaa", null); + apmTracer.startTrace(new ThreadContext(settings), "id2", "name-bbb", null); + apmTracer.startTrace(new ThreadContext(settings), "id3", "name-ccc", null); + + assertThat(apmTracer.getSpans(), hasKey("id1")); + assertThat(apmTracer.getSpans(), hasKey("id2")); + assertThat(apmTracer.getSpans(), not(hasKey("id3"))); + } + + /** + * Check that when a tracer has a list of include and exclude names configured, and + * a span matches both, then the exclude filters take precedence. + */ + public void test_whenTraceStarted_andSpanNameIncludedAndExcluded_thenSpanIsNotStarted() { + final List includePatterns = List.of("name-a*"); + final List excludePatterns = List.of("name-a*"); + Settings settings = Settings.builder() + .put(APM_ENABLED_SETTING.getKey(), true) + .putList(APM_TRACING_NAMES_INCLUDE_SETTING.getKey(), includePatterns) + .putList(APM_TRACING_NAMES_EXCLUDE_SETTING.getKey(), excludePatterns) + .build(); + APMTracer apmTracer = buildTracer(settings); + + apmTracer.startTrace(new ThreadContext(settings), "id1", "name-aaa", null); + + assertThat(apmTracer.getSpans(), not(hasKey("id1"))); + } + + /** + * Check that when a tracer has a list of exclude names configured, then those + * names are used to filter spans. + */ + public void test_whenTraceStarted_andSpanNameExcluded_thenSpanIsNotStarted() { + final List excludePatterns = List.of( + // exact name + "name-aaa", + // regex + "name-b*" + ); + Settings settings = Settings.builder() + .put(APM_ENABLED_SETTING.getKey(), true) + .putList(APM_TRACING_NAMES_EXCLUDE_SETTING.getKey(), excludePatterns) + .build(); + APMTracer apmTracer = buildTracer(settings); + + apmTracer.startTrace(new ThreadContext(settings), "id1", "name-aaa", null); + apmTracer.startTrace(new ThreadContext(settings), "id2", "name-bbb", null); + apmTracer.startTrace(new ThreadContext(settings), "id3", "name-ccc", null); + + assertThat(apmTracer.getSpans(), not(hasKey("id1"))); + assertThat(apmTracer.getSpans(), not(hasKey("id2"))); + assertThat(apmTracer.getSpans(), hasKey("id3")); + } + + private APMTracer buildTracer(Settings settings) { + APMTracer tracer = new APMTracer(settings); + tracer.doStart(); + return tracer; + } +} diff --git a/qa/apm/build.gradle b/qa/apm/build.gradle new file mode 100644 index 0000000000000..2750f24572b44 --- /dev/null +++ b/qa/apm/build.gradle @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import org.elasticsearch.gradle.Architecture +import org.elasticsearch.gradle.VersionProperties +import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER; + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.test.fixtures' +apply plugin: 'elasticsearch.internal-distribution-download' + +testFixtures.useFixture() + +dependencies { + testImplementation project(':client:rest-high-level') +} + +dockerCompose { + environment.put 'STACK_VERSION', VersionProperties.elasticsearch + // retainContainersOnStartupFailure = true +} + +elasticsearch_distributions { + docker { + type = DOCKER + architecture = Architecture.current() + version = VersionProperties.getElasticsearch() + failIfUnavailable = false // This ensures we skip this testing if Docker is unavailable + } +} + +tasks.named("preProcessFixture").configure { + dependsOn elasticsearch_distributions.matching { it.architecture == Architecture.current() } +} + +tasks.register("integTest", Test) { + outputs.doNotCacheIf('Build cache is disabled for Docker tests') { true } + maxParallelForks = '1' + include '**/*IT.class' +} + +tasks.named("check").configure { + dependsOn "integTest" +} diff --git a/qa/apm/config/elasticsearch/roles.yml b/qa/apm/config/elasticsearch/roles.yml new file mode 100644 index 0000000000000..91277fa8dd65d --- /dev/null +++ b/qa/apm/config/elasticsearch/roles.yml @@ -0,0 +1,34 @@ +--- +apm_server: + cluster: ['manage_ilm', 'manage_security', 'manage_api_key'] + indices: + - names: ['apm-*', 'logs-apm*', 'metrics-apm*', 'traces-apm*'] + privileges: ['write', 'create_index', 'manage', 'manage_ilm'] + applications: + - application: 'apm' + privileges: ['sourcemap:write', 'event:write', 'config_agent:read'] + resources: '*' +beats: + cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm', 'manage_security', 'manage_api_key'] + indices: + - names: ['filebeat-*', 'shrink-filebeat-*'] + privileges: ['all'] +filebeat: + cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm'] + indices: + - names: ['filebeat-*', 'shrink-filebeat-*'] + privileges: ['all'] +heartbeat: + cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm'] + indices: + - names: ['heartbeat-*', 'shrink-heartbeat-*'] + privileges: ['all'] +metricbeat: + cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm'] + indices: + - names: ['metricbeat-*', 'shrink-metricbeat-*'] + privileges: ['all'] +opbeans: + indices: + - names: ['opbeans-*'] + privileges: ['write', 'read'] diff --git a/qa/apm/config/elasticsearch/service_tokens b/qa/apm/config/elasticsearch/service_tokens new file mode 100644 index 0000000000000..02c39a69bc9bf --- /dev/null +++ b/qa/apm/config/elasticsearch/service_tokens @@ -0,0 +1,2 @@ +elastic/fleet-server/elastic-package-fleet-server-token:{PBKDF2_STRETCH}10000$PNiVyY96dHwRfoDszBvYPAz+mSLbC+NhtPh63dblDZU=$dAY1tXX1U5rXB+2Lt7m0L2LUNSb1q5nRaIqPNZTBxb8= +elastic/kibana/elastic-package-kibana-token:{PBKDF2_STRETCH}10000$wIEFHIIIZ2ap0D0iQsyw0MfB7YuFA1bHnXAmlCoL4Gg=$YxvIJnasjLZyDQZpmFBiJHdR/CGXd5BnVm013Jty6p0= diff --git a/qa/apm/config/elasticsearch/users b/qa/apm/config/elasticsearch/users new file mode 100644 index 0000000000000..4cc30a99d92f1 --- /dev/null +++ b/qa/apm/config/elasticsearch/users @@ -0,0 +1,9 @@ +admin:$2a$10$xiY0ZzOKmDDN1p3if4t4muUBwh2.bFHADoMRAWQgSClm4ZJ4132Y. +apm_server_user:$2a$10$iTy29qZaCSVn4FXlIjertuO8YfYVLCbvoUAJ3idaXfLRclg9GXdGG +apm_user_ro:$2a$10$hQfy2o2u33SapUClsx8NCuRMpQyHP9b2l4t3QqrBA.5xXN2S.nT4u +beats_user:$2a$10$LRpKi4/Q3Qo4oIbiu26rH.FNIL4aOH4aj2Kwi58FkMo1z9FgJONn2 +filebeat_user:$2a$10$sFxIEX8tKyOYgsbJLbUhTup76ssvSD3L4T0H6Raaxg4ewuNr.lUFC +heartbeat_user:$2a$10$nKUGDr/V5ClfliglJhfy8.oEkjrDtklGQfhd9r9NoFqQeoNxr7uUK +kibana_system_user:$2a$10$nN6sRtQl2KX9Gn8kV/.NpOLSk6Jwn8TehEDnZ7aaAgzyl/dy5PYzW +metricbeat_user:$2a$10$5PyTd121U2ZXnFk9NyqxPuLxdptKbB8nK5egt6M5/4xrKUkk.GReG +opbeans_user:$2a$10$iTy29qZaCSVn4FXlIjertuO8YfYVLCbvoUAJ3idaXfLRclg9GXdGG diff --git a/qa/apm/config/elasticsearch/users_roles b/qa/apm/config/elasticsearch/users_roles new file mode 100644 index 0000000000000..629fe7392c12f --- /dev/null +++ b/qa/apm/config/elasticsearch/users_roles @@ -0,0 +1,13 @@ +apm_server:apm_server_user +apm_system:apm_server_user +apm_user:apm_server_user,apm_user_ro +beats:beats_user +beats_system:beats_user,filebeat_user,heartbeat_user,metricbeat_user +filebeat:filebeat_user +heartbeat:heartbeat_user +ingest_admin:apm_server_user +kibana_system:kibana_system_user +kibana_user:apm_server_user,apm_user_ro,beats_user,filebeat_user,heartbeat_user,metricbeat_user,opbeans_user +metricbeat:metricbeat_user +opbeans:opbeans_user +superuser:admin diff --git a/qa/apm/config/kibana/kibana-8.yml b/qa/apm/config/kibana/kibana-8.yml new file mode 100644 index 0000000000000..4b3add76282d8 --- /dev/null +++ b/qa/apm/config/kibana/kibana-8.yml @@ -0,0 +1,78 @@ +xpack.fleet.packages: + - name: system + version: latest + - name: elastic_agent + version: latest + - name: apm + version: latest + - name: fleet_server + version: latest + +xpack.fleet.agentPolicies: + - name: Fleet Server + APM policy + id: fleet-server-apm-policy + description: Fleet server policy with APM and System logs and metrics enabled + namespace: default + is_default_fleet_server: true + is_managed: false + monitoring_enabled: + - logs + - metrics + package_policies: + - name: system-1 + package: + name: system + - name: apm-1 + package: + name: apm + inputs: + - type: apm + keep_enabled: true + vars: + - name: host + value: 0.0.0.0:8200 + frozen: true + - name: url + value: "${ELASTIC_APM_SERVER_URL}" + frozen: true + - name: enable_rum + value: true + frozen: true + - name: read_timeout + value: 1m + frozen: true + - name: shutdown_timeout + value: 2m + frozen: true + - name: write_timeout + value: 1m + frozen: true + - name: rum_allow_headers + value: + - x-custom-header + frozen: true + - name: secret_token + value: "${ELASTIC_APM_SECRET_TOKEN}" + frozen: true + - name: tls_enabled + value: ${ELASTIC_APM_TLS} + frozen: true + - name: tls_certificate + value: /usr/share/apmserver/config/certs/tls.crt + frozen: true + - name: tls_key + value: /usr/share/apmserver/config/certs/tls.key + frozen: true + - name: Fleet Server + package: + name: fleet_server + inputs: + - type: fleet-server + keep_enabled: true + vars: + - name: host + value: 0.0.0.0 + frozen: true + - name: port + value: 8220 + frozen: true diff --git a/qa/apm/docker-compose.yml b/qa/apm/docker-compose.yml new file mode 100644 index 0000000000000..b107788b2fb36 --- /dev/null +++ b/qa/apm/docker-compose.yml @@ -0,0 +1,154 @@ +version: "2.4" + +networks: + default: + name: apm-integration-testing + +services: + apmserver: + depends_on: + kibana: + condition: service_healthy + environment: + FLEET_ELASTICSEARCH_HOST: null + FLEET_SERVER_ELASTICSEARCH_INSECURE: "1" + FLEET_SERVER_ENABLE: "1" + FLEET_SERVER_HOST: 0.0.0.0 + FLEET_SERVER_INSECURE_HTTP: "1" + FLEET_SERVER_POLICY_ID: fleet-server-apm-policy + FLEET_SERVER_PORT: "8220" + FLEET_SERVER_SERVICE_TOKEN: AAEAAWVsYXN0aWMvZmxlZXQtc2VydmVyL2VsYXN0aWMtcGFja2FnZS1mbGVldC1zZXJ2ZXItdG9rZW46bmgtcFhoQzRRQ2FXbms2U0JySGlWQQ + KIBANA_FLEET_HOST: null + KIBANA_FLEET_SERVICE_TOKEN: AAEAAWVsYXN0aWMvZmxlZXQtc2VydmVyL2VsYXN0aWMtcGFja2FnZS1mbGVldC1zZXJ2ZXItdG9rZW46bmgtcFhoQzRRQ2FXbms2U0JySGlWQQ + KIBANA_FLEET_SETUP: "1" + healthcheck: + test: /bin/true + image: docker.elastic.co/beats/elastic-agent:${STACK_VERSION} + labels: + - co.elastic.apm.stack-version=${STACK_VERSION} + logging: + driver: json-file + options: + max-file: "5" + max-size: 2m + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - ./scripts/tls/apmserver/cert.crt:/usr/share/apmserver/config/certs/tls.crt + - ./scripts/tls/apmserver/key.pem:/usr/share/apmserver/config/certs/tls.key + + elasticsearch: + environment: + - action.destructive_requires_name=false + - bootstrap.memory_lock=true + - cluster.name=docker-cluster + - cluster.routing.allocation.disk.threshold_enabled=false + - discovery.type=single-node + - ES_JAVA_OPTS=-Xms1g -Xmx1g + - indices.id_field_data.enabled=true + - ingest.geoip.downloader.enabled=false + - path.repo=/usr/share/elasticsearch/data/backups + - xpack.license.self_generated.type=trial + - xpack.monitoring.collection.enabled=true + - xpack.security.authc.anonymous.roles=remote_monitoring_collector + - xpack.security.authc.api_key.enabled=true + - xpack.security.authc.realms.file.file1.order=0 + - xpack.security.authc.realms.native.native1.order=1 + - xpack.security.authc.token.enabled=true + - xpack.security.enabled=true + # APM specific settings. We don't configure `secret_key` because Kibana is configured with a blank key + - tracing.apm.enabled=true + - tracing.apm.agent.server_url=http://apmserver:8200 + # Send traces to APM server aggressively + - tracing.apm.agent.metrics_interval=1s + # Record everything + - tracing.apm.agent.transaction_sample_rate=1 + - tracing.apm.agent.log_level=debug + healthcheck: + interval: 20s + retries: 10 + test: curl -s -k http://localhost:9200/_cluster/health | grep -vq '"status":"red"' + image: elasticsearch:test + labels: + - co.elastic.apm.stack-version=${STACK_VERSION} + - co.elastic.metrics/module=elasticsearch + - co.elastic.metrics/metricsets=node,node_stats + - co.elastic.metrics/hosts=http://$${data.host}:9200 + logging: + driver: json-file + options: + max-file: "5" + max-size: 2m + ports: + # - 127.0.0.1:9200:9200 + - "9200" + ulimits: + memlock: + hard: -1 + soft: -1 + volumes: + - ./config/elasticsearch/roles.yml:/usr/share/elasticsearch/config/roles.yml + - ./config/elasticsearch/users:/usr/share/elasticsearch/config/users + - ./config/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles + - ./config/elasticsearch/service_tokens:/usr/share/elasticsearch/config/service_tokens + + kibana: + depends_on: + elasticsearch: + condition: service_healthy + environment: + ELASTICSEARCH_HOSTS: http://elasticsearch:9200 + ELASTICSEARCH_PASSWORD: changeme + ELASTICSEARCH_USERNAME: kibana_system_user + ELASTIC_APM_SECRET_TOKEN: "" + ELASTIC_APM_SERVER_URL: http://apmserver:8200 + ELASTIC_APM_TLS: "false" + SERVER_HOST: 0.0.0.0 + SERVER_NAME: kibana.example.org + STATUS_ALLOWANONYMOUS: "true" + TELEMETRY_ENABLED: "false" + XPACK_APM_SERVICEMAPENABLED: "true" + XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: fhjskloppd678ehkdfdlliverpoolfcr + XPACK_FLEET_AGENTS_ELASTICSEARCH_HOSTS: '["http://elasticsearch:9200"]' + # XPACK_FLEET_REGISTRYURL: https://epr-snapshot.elastic.co + XPACK_MONITORING_ENABLED: "true" + XPACK_REPORTING_ROLES_ENABLED: "false" + XPACK_SECURITY_ENCRYPTIONKEY: fhjskloppd678ehkdfdlliverpoolfcr + XPACK_SECURITY_LOGINASSISTANCEMESSAGE: Login details: `admin/changeme`. Further details [here](https://github.com/elastic/apm-integration-testing#logging-in). + XPACK_SECURITY_SESSION_IDLETIMEOUT: 1M + XPACK_SECURITY_SESSION_LIFESPAN: 3M + XPACK_XPACK_MAIN_TELEMETRY_ENABLED: "false" + healthcheck: + interval: 10s + retries: 30 + start_period: 10s + test: curl -s -k http://kibana:5601/api/status | grep -q 'All services are available' + image: docker.elastic.co/kibana/kibana:${STACK_VERSION} + labels: + - co.elastic.apm.stack-version=${STACK_VERSION} + logging: + driver: json-file + options: + max-file: "5" + max-size: 2m + # ports: + # - 127.0.0.1:5601:5601 + volumes: + - ./config/kibana/kibana-8.yml:/usr/share/kibana/config/kibana.yml + + # Rather than mess aroud with threads in the test, just run `curl` in a + # loop to generate traces with a known path + tracegenerator: + depends_on: + apmserver: + condition: service_healthy + elasticsearch: + condition: service_healthy + kibana: + condition: service_healthy + # Official curl image + image: curlimages/curl + command: /bin/sh -c "while true; do curl -s -k -u admin:changeme http://elasticsearch:9200/_nodes/stats >/dev/null ; sleep 3; done" + +volumes: + esdata: + driver: local diff --git a/qa/apm/scripts/tls/apm-server/cert.crt b/qa/apm/scripts/tls/apm-server/cert.crt new file mode 100644 index 0000000000000..b2f9aa7b5d230 --- /dev/null +++ b/qa/apm/scripts/tls/apm-server/cert.crt @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEpjCCAo4CCQDR9oXvJbopHjANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDDAph +cG0tc2VydmVyMB4XDTE5MTExOTE1MjE0NVoXDTI5MTExNjE1MjE0NVowFTETMBEG +A1UEAwwKYXBtLXNlcnZlcjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +ANduj3tyeBIHj0Bf5aKMRImhRbkAaQ2p6T0WsHKlicd1P4/D5l783+vVsbwprRqR +qXAUsUWcUSYJXBX1qtC2MtKqi4xYUTAyQV5dgrMoCV+vtZY31SK4kolumd1vVMh+ +po+IwueLvLMFK1tQGIXlJblSDYVauIt5rp79IIhWOY/YpcQy9RaxykljTYTbPjLW +m3T92bow1nLh5GL3ThJEAkLO+hkJv9716+YRWYtPcojiGzpLjFgF50MoP4Lilm9U +r2tBnqpvb2PwE1kkly8DDBtcg+HM4tgGsbdWo2Pgp82ARV4DL+JlNJ+SVQZAmTbc +3LMwxnUJtuKMeh2rwb9HOyuONXfF1PiEzyDhAlabyS6toAGy1mlMAop1ClO1wV5O +Ayy47TeD6ziNyMKB7/XHdW4rb16K6j6EV27Bg2ZK6Vrfkwm3aRbpztfVRMX+HMUp +ktH+V2OwJoP7l7lzw/q8yMdopG57zRJa1dx8NWP/UKi8Ej+87DYyWJODiNHD7PM7 +9vfd47lNcWxw+p7ntEpnn6EeW2r7SlmfhtdIxL2DiTiKAq9Ktyi9cFnGnDfSDJST +T1G1vIDdG33Vt2Y5+wqzCGbYyMsAOaMdXZSeniXXFR4GX7iz+AGoKojBbmoo9VqP +mvbudNU+ysha4IJvTfOczJZgstxCXG+MXbEXFSgysImFAgMBAAEwDQYJKoZIhvcN +AQELBQADggIBAFh2YxRT6PaAXDq38rm25I91fCP9PzVPDuIkn9wl85e7avuh6FZi +R0nQG6+lB1i8XSm9UMl9+ISjE+EQqry6KB6mDsakGOsDuEUdZiw3sGJIUWQkQArB +ym5DqxKpeZBeVHBxnrEbQBV8s0j8uxd7X1E0ImfMKbKfNr/B5qPRXkREvydLWYvq +8yMcUPu1MiZFUgAGr9Py39kW3lbRPWZii/2bN8AB9h6gAhq5TiennfgJZsRiuSta +w/TmOcAuz4e/KPIzfvL/YCWbLyJ2vrIQeOc4N7jZfqMmLKgYCRyjI7+amfuyKPBW +J4psfJ0ssHdTxAUK65vghJ2s6FLvU3HoxzetZsJp5kj6CKYaFYkB4NkkYnlY8MP/ +T68oOmdYwwwrcBmDtZwoppRb5zhev5k3aykgZ/B/vqVJE9oIPkp/7wqEP1WqSiUe +AgyQBu8UN4ho2Rf6nZezZ4cjW/0WyhGOHQBFmwPI2MBGsQxF2PF4lKkJtaywIEm7 +4UsEQYK7Hf2J2OccWGvfo5HZ5tsSbuOGAf0bfHfaBQBsvzWet+TO6XX9VrWjnAKl +bH+mInmnd9v2oABFl9Djv/Cw+lEAxxkCTW+DcwdEFJREPab5xhQDEpQQ/Ef0ihvg +/ZtJQeoOYfrLN6K726QmoRWxvqxLyWK3gztcO1svHqr/cMt3ooLJEaqU +-----END CERTIFICATE----- diff --git a/qa/apm/scripts/tls/apm-server/key.pem b/qa/apm/scripts/tls/apm-server/key.pem new file mode 100644 index 0000000000000..31208905f7d78 --- /dev/null +++ b/qa/apm/scripts/tls/apm-server/key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDXbo97cngSB49A +X+WijESJoUW5AGkNqek9FrBypYnHdT+Pw+Ze/N/r1bG8Ka0akalwFLFFnFEmCVwV +9arQtjLSqouMWFEwMkFeXYKzKAlfr7WWN9UiuJKJbpndb1TIfqaPiMLni7yzBStb +UBiF5SW5Ug2FWriLea6e/SCIVjmP2KXEMvUWscpJY02E2z4y1pt0/dm6MNZy4eRi +904SRAJCzvoZCb/e9evmEVmLT3KI4hs6S4xYBedDKD+C4pZvVK9rQZ6qb29j8BNZ +JJcvAwwbXIPhzOLYBrG3VqNj4KfNgEVeAy/iZTSfklUGQJk23NyzMMZ1CbbijHod +q8G/RzsrjjV3xdT4hM8g4QJWm8kuraABstZpTAKKdQpTtcFeTgMsuO03g+s4jcjC +ge/1x3VuK29eiuo+hFduwYNmSula35MJt2kW6c7X1UTF/hzFKZLR/ldjsCaD+5e5 +c8P6vMjHaKRue80SWtXcfDVj/1CovBI/vOw2MliTg4jRw+zzO/b33eO5TXFscPqe +57RKZ5+hHltq+0pZn4bXSMS9g4k4igKvSrcovXBZxpw30gyUk09RtbyA3Rt91bdm +OfsKswhm2MjLADmjHV2Unp4l1xUeBl+4s/gBqCqIwW5qKPVaj5r27nTVPsrIWuCC +b03znMyWYLLcQlxvjF2xFxUoMrCJhQIDAQABAoICAQCfClIGsoUN2mLZBXLDw4W9 +jT+pyjHEEpHLtXphyO+kPlzER71Elq7AriveW24d1TcfNUeBulr2F6bR12FZX4i5 +mYoX/AND73Xusl4Q4Re6ej82PNWuIlCcAPi6Trxqn4VbJX2t7q1KBCDz8neIMZjd +7UNqFYV0Akr1uK1RuUYZebk21N+29139O8A4upp6cZCml9kq6W8HtNgkb6pFNcvt +gluELHxnn2mdmWVfwTEu+K1dJfTf7svB+m6Ys6qXWg9+wRzfehDj2JKQFsE9xaQk +dvItulIlZRvB28YXr/xxa6bKNtQc8NYej6sRSJNTu017RCDeumM3cLmeOfR4v59f +tkMWnFcA3ykmsaK2FiQyX+MoWvs5vdT7/yNIfz3a4MErcWg8z3FDbffKfbhgsb+2 +z4Ub6fIRKZykW2ajN7t0378bMmJ3rPT66QF40aNNeWasF3EHcwekDPpsHIBJoY4G +9aG6uTUmRkC+NGeP9HroxkvDo2NbXn8XGOEJS64rwsME3CsUi1A5ZY0XLTxYptH6 +X2TfC5oTmnsYB/wWqo26bTJc0bwDOueQWYap0aVtv3f/0tzueKepCbxdeG4ikA0U +2t3F+OUmoCZ5D0p+6zLvrTUPhPCFEynp+vGUvmbwozYi0NWzFyFqlvqRG1KLIVLG +ZRyTMYuZ/cWkv1SJYbEcaQKCAQEA/9HaJg2YACv7rx6/FesE/81u16OYTaahHngW +4M+5rT0+fNKYH/fYkwavQ/Gr6FSTls7F+8K9DVwoGLZRQ3t6epCXqGqX0uaY+iSH +O8eezXVnHzUaVE4KlwJY9xZ+K1iIf5zUb5hpaQI0jKS/igcxFAsutWiyenrz8eQp +MAycZmzkQMLbUsa1t6y0VaEaC4YMHyQ9ag2eMfqbG27plFQbYxllHXowGMFXPheY +xACwo5V5tJUgRP+HlrI4rf0vadMgVIKxVSUiqIzGREIkYrTAshFjkpHR5/R8s/kH +Xm8q2gdoJltBFJzA2B8MHXVi7mYDBlUmBoRKhzkl/TSray9j7wKCAQEA15VsNQZu +cZluboz/R4EDbEm1po2UBcNNiu/fgJ8BDUkLzJESIITY41fgvBbTun1fiuGeE+El +0o1w4hQhIiV1KAB44w69fJR0VELfMZiIcd8kd0sDgPPVrd1MzzKPZ9yg4mbEkCCO +V/EoTi8Ut27sMcl8059qm1qq7I5pzHwSziNa087m+5VdfmvJZJVipudngZ3QmRgU +KKcBhgFFSkncYezoq2XQfRcqkk0sORxDvsMmRInyHZh0l9zv46ihgTvErlCHtizV +V4HNO4OPz7FxUZ04iWSGZs4snu1cW2j+lbKuOkADveBYVmCcdZ3R0SH+A5skL0zG +tm6z0TNP/kFlywKCAQEA+lTdFu2od0qTADujG4yemL7rn2J8EEhlU86J/LXo6UiM +FFNz/5xltwIMkf00jqXswt9WR9W5cBBlQEFwZgu3v6YscebU6NE0k1sZZnshv8YK +AjTRrfusSzdF3YyKLFp3QAE0tHs9cz9wMsyojiYZdZa3v1dTh503h9YQI+/DQEuA +VIsZWfgPLEx5L231cZ9bz0GEQ3pN+nRUQdUYB0kCf8gC9YRy+lZ/y8gFeo9+SqVj +sj1XlY1DnkiKRGAEfJbYBTra0woCz1LqVTMwLdLY2adAe9XrxQKu4OJovpUkJrSm +yxnzJnt6DkLbdRxAki8K+LBsBGaCE67tqMhYkguOywKCAQAslEl77YiJFSEw2xcu +wg7jJZrahgxF5Mz0HgYporek96Xo91a4QsBWwqVGP7IoriRDo8P8eGJJ19Wv6lmv +pe9EBlT5HuMwD8K+adWde907Ltlrkad30vQsr8ZiUiI1Z/oc1wNuikzlAolDIZk3 +FUjiQrf9SsnQtj8CC7D1B/MbjVQK2I4LGCftLHzIv9tWiCNvOiMYhVIl1eMKwtiB +NCTOWx8B0lv6gf/boPm0FZQsrk4LfjsCw7PYc2dnvEcpYiKZqS1nDn5PShgWZm4m +lJrKNairQI5KU/gGJS8j9+ItMnW0tegQK4QY2IGCENCCXnUYacxhu46byuiEKggw +m3VhAoIBAQCQa90StsZHqZ+J83do3kpvD+O5nURPnckznC2WJgraW49k5vltnJTT +zkFTqHMLfmYwAz1o15sPCqlkMD+fEUzg6Hpzxm7dOUppkf5KFbD7AnsYU9U8LamJ +HaET7Dq5TpjG7uoaHZZjs7cCHcWu2E8nIezyAtZ+rbTg/qW7bYMAlJTkerznGuDU +v0hNzCr/81o5rbX0UhetcmKVOprUSWzfrw5ElLhAtzM7zivbZSnsOny8pC33FtQ5 +iQbVcNGUjfFCM95ZipxxN9z0FwxpJ1paCPGYA86u2olWl/VnVPqEj7WYzO8H5W2q +aXpWH6HVf6B10pQrWWwUAAHyqYS5bZkQ +-----END PRIVATE KEY----- diff --git a/qa/apm/src/test/java/org/elasticsearch/tracing/apm/ApmIT.java b/qa/apm/src/test/java/org/elasticsearch/tracing/apm/ApmIT.java new file mode 100644 index 0000000000000..1ac741235b94e --- /dev/null +++ b/qa/apm/src/test/java/org/elasticsearch/tracing/apm/ApmIT.java @@ -0,0 +1,209 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.tracing.apm; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.not; + +/** + * Tests around Elasticsearch's tracing support using APM. + */ +public class ApmIT extends ESRestTestCase { + + private static final String DATA_STREAM = "traces-apm-default"; + + /** + * Check that if we send HTTP traffic to Elasticsearch, then traces are captured in APM server. The traces are generated in + * a separate Docker container, which continually fetches `/_nodes/stats`. We check for the following: + *

    + *
  • A transaction for the REST API call + *
  • A span for the task started by the REST call + *
  • A child span started by the above span + *
+ *

This proves that the hierarchy of spans is being correctly captured. + */ + public void testCapturesTracesForHttpTraffic() throws Exception { + checkTracesDataStream(); + + assertTracesExist(); + } + + private void checkTracesDataStream() throws Exception { + assertBusy(() -> { + final Response response = performRequestTolerantly(new Request("GET", "/_data_stream/" + DATA_STREAM)); + assertOK(response); + }, 1, TimeUnit.MINUTES); + } + + private void assertTracesExist() throws Exception { + // First look for a transaction for the REST calls that we make via the `tracegenerator` Docker container + + final AtomicReference transactionId = new AtomicReference<>(); + assertBusy(() -> { + final Request tracesSearchRequest = new Request("GET", "/" + DATA_STREAM + "/_search"); + tracesSearchRequest.setJsonEntity(""" + { + "query": { + "match": { "transaction.name": "GET /_nodes/stats" } + } + }"""); + final Response tracesSearchResponse = performRequestTolerantly(tracesSearchRequest); + assertOK(tracesSearchResponse); + + final List> documents = getDocuments(tracesSearchResponse); + assertThat(documents, not(empty())); + + final Map tx = documents.get(0); + + check(tx, "http.request.method", "GET"); + check(tx, "http.response.status_code", 200); + check(tx, "labels.es_cluster_name", "docker-cluster"); + check(tx, "labels.http_request_headers_authorization", "[REDACTED]"); + check(tx, "span.kind", "SERVER"); + check(tx, "transaction.result", "HTTP 2xx"); + check(tx, "url.path", "/_nodes/stats"); + + final String txId = pluck(tx, "transaction.id"); + transactionId.set(txId); + }, 1, TimeUnit.MINUTES); + + // Then look for the task that the REST call starts + + final AtomicReference monitorNodeStatsSpanId = new AtomicReference<>(); + assertBusy(() -> { + final List> documents = searchByParentId(transactionId.get()); + assertThat(documents, not(empty())); + + final Map spansByName = documents.stream().collect(Collectors.toMap(d -> pluck(d, "span.name"), d -> d)); + + assertThat(spansByName, hasKey("cluster:monitor/nodes/stats")); + + @SuppressWarnings("unchecked") + final Map span = (Map) spansByName.get("cluster:monitor/nodes/stats"); + check(span, "span.kind", "INTERNAL"); + + final String spanId = pluck(span, "span.id"); + monitorNodeStatsSpanId.set(spanId); + }, 1, TimeUnit.MINUTES); + + // Finally look for the child task that the task above started + + assertBusy(() -> { + final List> documents = searchByParentId(monitorNodeStatsSpanId.get()); + assertThat(documents, not(empty())); + + final Map spansByName = documents.stream().collect(Collectors.toMap(d -> pluck(d, "span.name"), d -> d)); + + assertThat(spansByName, hasKey("cluster:monitor/nodes/stats[n]")); + }, 1, TimeUnit.MINUTES); + } + + @SuppressWarnings("unchecked") + private T pluck(Map map, String path) { + String[] parts = path.split("\\."); + + Object result = map; + + for (String part : parts) { + result = ((Map) result).get(part); + } + + return (T) result; + } + + private List> searchByParentId(String parentId) throws IOException { + final Request searchRequest = new Request("GET", "/" + DATA_STREAM + "/_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "match": { "parent.id": "%s" } + } + }""".formatted(parentId)); + final Response response = performRequestTolerantly(searchRequest); + assertOK(response); + + return getDocuments(response); + } + + /** + * We don't need to clean up the cluster, particularly as we have Kibana and APM server using ES as well as our test, so declare + * that we need to preserve the cluster in order to prevent the usual cleanup logic from running (and inevitably failing). + */ + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + /** + * Turns exceptions into assertion failures so that {@link #assertBusy(CheckedRunnable)} can still retry. + */ + private Response performRequestTolerantly(Request request) { + try { + return client().performRequest(request); + } catch (Exception e) { + throw new AssertionError(e); + } + } + + /** + * Customizes the client settings to use the same username / password that is configured in Docke.r + */ + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("admin", new SecureString("changeme".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + /** + * Constructs the correct cluster address by looking up the dynamic port that Elasticsearch is exposed on. + */ + @Override + protected String getTestRestCluster() { + return "localhost:" + getProperty("test.fixtures.elasticsearch.tcp.9200"); + } + + @SuppressWarnings("unchecked") + private List> getDocuments(Response response) throws IOException { + final Map stringObjectMap = ESRestTestCase.entityAsMap(response); + return (List>) XContentMapValues.extractValue("hits.hits._source", stringObjectMap); + } + + private String getProperty(String key) { + String value = System.getProperty(key); + if (value == null) { + throw new IllegalStateException( + "Could not find system properties from test.fixtures. " + + "This test expects to run with the elasticsearch.test.fixtures Gradle plugin" + ); + } + return value; + } + + private void check(Map doc, String path, T expected) { + assertThat(pluck(doc, path), equalTo(expected)); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index d3c6525432bd1..9b6ec86a65a51 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -485,6 +485,7 @@ public ActionModule( actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()), Stream.of( new RestHeaderDefinition(Task.X_OPAQUE_ID_HTTP_HEADER, false), + new RestHeaderDefinition(Task.TRACE_STATE, false), new RestHeaderDefinition(Task.TRACE_PARENT_HTTP_HEADER, false), new RestHeaderDefinition(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER, false) ) diff --git a/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java b/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java index 040342885680b..5fcbb9b64ca65 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java @@ -184,7 +184,8 @@ public boolean test(Permission permission) { new RuntimePermission("createClassLoader"), new RuntimePermission("getFileStoreAttributes"), new RuntimePermission("accessUserInformation"), - new AuthPermission("modifyPrivateCredentials") + new AuthPermission("modifyPrivateCredentials"), + new RuntimePermission("accessSystemModules") ); PermissionCollection modulePermissionCollection = new Permissions(); namedPermissions.forEach(modulePermissionCollection::add); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 7da6ce409debb..e0fb308e4d9b7 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -87,7 +87,9 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; @@ -397,6 +399,8 @@ protected Node( ); } + deleteTemporaryApmConfig(jvmInfo); + this.pluginsService = pluginServiceCtor.apply(tmpSettings); final Settings settings = mergePluginSettings(pluginsService.pluginMap(), tmpSettings); @@ -422,7 +426,9 @@ protected Node( Task.HEADERS_TO_COPY.stream() ).collect(Collectors.toSet()); - final TaskManager taskManager = new TaskManager(settings, threadPool, taskHeaders); + final Tracer tracer = getTracer(pluginsService, settings); + + final TaskManager taskManager = new TaskManager(settings, threadPool, taskHeaders, tracer); // register the node.data, node.ingest, node.master, node.remote_cluster_client settings here so we can mark them private final List> additionalSettings = new ArrayList<>(pluginsService.flatMap(Plugin::getSettings).toList()); @@ -691,8 +697,6 @@ protected Node( shardLimitValidator ); - final Tracer tracer = getTracer(pluginsService, clusterService, settings); - Collection pluginComponents = pluginsService.flatMap( p -> p.createComponents( client, @@ -1101,14 +1105,45 @@ protected Node( } } - private Tracer getTracer(PluginsService pluginsService, ClusterService clusterService, Settings settings) { + /** + * If the JVM was started with the Elastic APM agent and a config file argument was specified, then + * delete the config file. The agent only reads it once, when supplied in this fashion, and it + * may contain a secret token. + */ + @SuppressForbidden(reason = "Cannot guarantee that the temp config path is relative to the environment") + private void deleteTemporaryApmConfig(JvmInfo jvmInfo) { + for (String inputArgument : jvmInfo.getInputArguments()) { + if (inputArgument.startsWith("-javaagent:")) { + final String agentArg = inputArgument.substring(11); + final String[] parts = agentArg.split("=", 2); + if (parts[0].matches("modules/x-pack-apm-integration/elastic-apm-agent-\\d+\\.\\d+\\.\\d+\\.jar")) { + if (parts.length == 2 && parts[1].startsWith("c=")) { + final Path apmConfig = PathUtils.get(parts[1].substring(2)); + if (apmConfig.getFileName().toString().matches("^\\.elstcapm\\..*\\.tmp")) { + try { + Files.deleteIfExists(apmConfig); + } catch (IOException e) { + logger.error( + "Failed to delete temporary APM config file [" + apmConfig + "], reason: [" + e.getMessage() + "]", + e + ); + } + } + } + return; + } + } + } + } + + private Tracer getTracer(PluginsService pluginsService, Settings settings) { final List tracerPlugins = pluginsService.filterPlugins(TracerPlugin.class); if (tracerPlugins.size() > 1) { throw new IllegalStateException("A single TracerPlugin was expected but got: " + tracerPlugins); } - return tracerPlugins.isEmpty() ? Tracer.NOOP : tracerPlugins.get(0).getTracer(clusterService, settings); + return tracerPlugins.isEmpty() ? Tracer.NOOP : tracerPlugins.get(0).getTracer(settings); } private HealthService createHealthService( diff --git a/server/src/main/java/org/elasticsearch/plugins/TracerPlugin.java b/server/src/main/java/org/elasticsearch/plugins/TracerPlugin.java index 2a6d5d778ba89..3e5cddc28e3b0 100644 --- a/server/src/main/java/org/elasticsearch/plugins/TracerPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/TracerPlugin.java @@ -8,10 +8,9 @@ package org.elasticsearch.plugins; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tracing.Tracer; public interface TracerPlugin { - Tracer getTracer(ClusterService clusterService, Settings settings); + Tracer getTracer(Settings settings); } diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy index f9b37f65538f2..568b07ea9fa16 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -71,6 +71,10 @@ grant codeBase "${codebase.jna}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; +grant codeBase "${codebase.log4j-api}" { + permission java.lang.RuntimePermission "getClassLoader"; +}; + //// Everything else: grant { diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 69632e51176f0..b8b193bf4cbb4 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -60,6 +60,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; @@ -128,22 +129,6 @@ public void testApplyRelevantHeaders() throws Exception { restHeaders.put("header.2", Collections.singletonList("true")); restHeaders.put("header.3", Collections.singletonList("false")); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); - final RestController spyRestController = spy(restController); - when(spyRestController.getAllHandlers(null, fakeRequest.rawPath())).thenReturn(new Iterator<>() { - @Override - public boolean hasNext() { - return false; - } - - @Override - public MethodHandlers next() { - return new MethodHandlers("/").addMethod(GET, RestApiVersion.current(), (request, channel, client) -> { - assertEquals("true", threadContext.getHeader("header.1")); - assertEquals("true", threadContext.getHeader("header.2")); - assertNull(threadContext.getHeader("header.3")); - }); - } - }); AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); // the rest controller relies on the caller to stash the context, so we should expect these values here as we didn't stash the @@ -204,39 +189,26 @@ public MethodHandlers next() { } /** - * Check that dispatching a request causes relevant trace headers to be put into the thread context. + * Check that the REST controller picks up and propagates W3C trace context headers via the {@link ThreadContext}. + * @see Trace Context - W3C Recommendation */ public void testTraceParentAndTraceId() { final ThreadContext threadContext = client.threadPool().getThreadContext(); Set headers = Set.of(new RestHeaderDefinition(Task.TRACE_PARENT_HTTP_HEADER, false)); final RestController restController = new RestController(headers, null, null, circuitBreakerService, usageService, tracer); Map> restHeaders = new HashMap<>(); - restHeaders.put( - Task.TRACE_PARENT_HTTP_HEADER, - Collections.singletonList("00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01") - ); + final String traceParentValue = "00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01"; + restHeaders.put(Task.TRACE_PARENT_HTTP_HEADER, Collections.singletonList(traceParentValue)); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); - final RestController spyRestController = spy(restController); - when(spyRestController.getAllHandlers(null, fakeRequest.rawPath())).thenReturn(new Iterator<>() { - @Override - public boolean hasNext() { - return false; - } - - @Override - public MethodHandlers next() { - return new MethodHandlers("/").addMethod(GET, RestApiVersion.current(), (request, channel, client) -> { - assertEquals("0af7651916cd43dd8448eb211c80319c", threadContext.getHeader(Task.TRACE_ID)); - assertNull(threadContext.getHeader(Task.TRACE_PARENT_HTTP_HEADER)); - }); - } - }); AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + restController.dispatchRequest(fakeRequest, channel, threadContext); + // the rest controller relies on the caller to stash the context, so we should expect these values here as we didn't stash the // context in this test - assertEquals("0af7651916cd43dd8448eb211c80319c", threadContext.getHeader(Task.TRACE_ID)); - assertNull(threadContext.getHeader(Task.TRACE_PARENT_HTTP_HEADER)); + assertThat(threadContext.getHeader(Task.TRACE_ID), equalTo("0af7651916cd43dd8448eb211c80319c")); + assertThat(threadContext.getHeader(Task.TRACE_PARENT_HTTP_HEADER), nullValue()); + assertThat(threadContext.getTransient("parent_" + Task.TRACE_PARENT_HTTP_HEADER), equalTo(traceParentValue)); } public void testRequestWithDisallowedMultiValuedHeaderButSameValues() { @@ -985,7 +957,6 @@ RestResponse getRestResponse() { boolean getSendResponseCalled() { return getRestResponse() != null; } - } private static final class ExceptionThrowingChannel extends AbstractRestChannel { From 740bcde590ddcb77a59b9acd7c5ccfeca7fa2bd1 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 3 Aug 2022 08:27:01 -0500 Subject: [PATCH 084/265] Fixing a race condition in CoordinationDiagnosticsServiceIT #89055 This makes sure that the test cluster is stable in CoordinationDiagnosticsServiceIT::testBlockClusterStateProcessingOnOneNode before proceeding with the rest of test. --- .../cluster/coordination/CoordinationDiagnosticsServiceIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java index 8819023d8d47d..66346aae64dca 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java @@ -35,7 +35,6 @@ private void setBootstrapMasterNodeIndex() { internalCluster().setBootstrapMasterNodeIndex(0); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/89015") public void testBlockClusterStateProcessingOnOneNode() throws Exception { /* * This test picks a node that is not elected master, and then blocks cluster state processing on it. The reason is so that we @@ -48,6 +47,7 @@ public void testBlockClusterStateProcessingOnOneNode() throws Exception { assertThat(nodeNames, hasItem(master)); String blockedNode = nodeNames.stream().filter(n -> n.equals(master) == false).findAny().get(); assertNotNull(blockedNode); + ensureStableCluster(3); DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, master).state().nodes(); Set nodesWithoutBlockedNode = discoveryNodes.getNodes() From c741b8c531b91688df07fc31cae73fa386faac9a Mon Sep 17 00:00:00 2001 From: Christos Soulios <1561376+csoulios@users.noreply.github.com> Date: Wed, 3 Aug 2022 17:46:01 +0300 Subject: [PATCH 085/265] Fix failing test `RollupActionSingleNodeTests` `testCannotRollupToExistingIndex` (#89025) The root cause of this failure was that test testCannotRollupWhileOtherRollupInProgress would finish before the asynchronously submitted rollup action had not completed. In this case the test would finish and delete the rollup index, while the rollup process was still trying to populate or replicate it. It looks like using the ActionListener.NOOP was not a good choice. Fixes https://github.com/elastic/elasticsearch/issues/88844 --- .../v2/RollupActionSingleNodeTests.java | 165 ++++++++++-------- 1 file changed, 94 insertions(+), 71 deletions(-) diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java index 6e4f7dbf19433..721c60876b730 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java @@ -94,9 +94,11 @@ import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static org.elasticsearch.index.mapper.TimeSeriesParams.TIME_SERIES_METRIC_PARAM; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; @@ -163,57 +165,59 @@ public void setup() { * check that the value of the label (last value) matches the value * of the corresponding metric which uses a last_value metric type. */ - client().admin() - .indices() - .prepareCreate(sourceIndex) - .setSettings( - Settings.builder() - .put("index.number_of_shards", numOfShards) - .put("index.number_of_replicas", numOfReplicas) - .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)) - .put( - IndexSettings.TIME_SERIES_START_TIME.getKey(), - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(Instant.ofEpochMilli(startTime).toEpochMilli()) - ) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z") - .build() - ) - .setMapping( - FIELD_TIMESTAMP, - "type=date", - FIELD_DIMENSION_1, - "type=keyword,time_series_dimension=true", - FIELD_DIMENSION_2, - "type=long,time_series_dimension=true", - FIELD_NUMERIC_1, - "type=long,time_series_metric=gauge", - FIELD_NUMERIC_2, - "type=double,time_series_metric=counter", - FIELD_LABEL_DOUBLE, - "type=double", - FIELD_LABEL_INTEGER, - "type=integer", - FIELD_LABEL_KEYWORD, - "type=keyword", - FIELD_LABEL_TEXT, - "type=text", - FIELD_LABEL_BOOLEAN, - "type=boolean", - FIELD_METRIC_LABEL_DOUBLE, /* numeric label indexed as a metric */ - "type=double,time_series_metric=counter", - FIELD_LABEL_IPv4_ADDRESS, - "type=ip", - FIELD_LABEL_IPv6_ADDRESS, - "type=ip", - FIELD_LABEL_DATE, - "type=date,format=date_optional_time", - FIELD_LABEL_KEYWORD_ARRAY, - "type=keyword", - FIELD_LABEL_DOUBLE_ARRAY, - "type=double" - ) - .get(); + assertAcked( + client().admin() + .indices() + .prepareCreate(sourceIndex) + .setSettings( + Settings.builder() + .put("index.number_of_shards", numOfShards) + .put("index.number_of_replicas", numOfReplicas) + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)) + .put( + IndexSettings.TIME_SERIES_START_TIME.getKey(), + DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(Instant.ofEpochMilli(startTime).toEpochMilli()) + ) + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z") + .build() + ) + .setMapping( + FIELD_TIMESTAMP, + "type=date", + FIELD_DIMENSION_1, + "type=keyword,time_series_dimension=true", + FIELD_DIMENSION_2, + "type=long,time_series_dimension=true", + FIELD_NUMERIC_1, + "type=long,time_series_metric=gauge", + FIELD_NUMERIC_2, + "type=double,time_series_metric=counter", + FIELD_LABEL_DOUBLE, + "type=double", + FIELD_LABEL_INTEGER, + "type=integer", + FIELD_LABEL_KEYWORD, + "type=keyword", + FIELD_LABEL_TEXT, + "type=text", + FIELD_LABEL_BOOLEAN, + "type=boolean", + FIELD_METRIC_LABEL_DOUBLE, /* numeric label indexed as a metric */ + "type=double,time_series_metric=counter", + FIELD_LABEL_IPv4_ADDRESS, + "type=ip", + FIELD_LABEL_IPv6_ADDRESS, + "type=ip", + FIELD_LABEL_DATE, + "type=date,format=date_optional_time", + FIELD_LABEL_KEYWORD_ARRAY, + "type=keyword", + FIELD_LABEL_DOUBLE_ARRAY, + "type=double" + ) + .get() + ); } public void testRollupIndex() throws IOException { @@ -285,8 +289,7 @@ public void testCopyIndexSettings() throws IOException { logger.info("Updating index [{}] with settings [{}]", sourceIndex, settings); var updateSettingsReq = new UpdateSettingsRequest(settings, sourceIndex); - var r = client().admin().indices().updateSettings(updateSettingsReq).actionGet(); - assertTrue("Update settings not acked", r.isAcknowledged()); + assertAcked(client().admin().indices().updateSettings(updateSettingsReq).actionGet()); RollupActionConfig config = new RollupActionConfig(randomInterval()); SourceSupplier sourceSupplier = () -> { @@ -361,7 +364,13 @@ public void testCannotRollupToExistingIndex() throws Exception { prepareSourceIndex(sourceIndex); // Create an empty index with the same name as the rollup index - client().admin().indices().prepareCreate(rollupIndex).get(); + assertAcked( + client().admin() + .indices() + .prepareCreate(rollupIndex) + .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build()) + .get() + ); ResourceAlreadyExistsException exception = expectThrows( ResourceAlreadyExistsException.class, () -> rollup(sourceIndex, rollupIndex, config) @@ -433,12 +442,31 @@ public void testCannotRollupWhileOtherRollupInProgress() throws Exception { .endObject(); bulkIndex(sourceSupplier); prepareSourceIndex(sourceIndex); - client().execute(RollupAction.INSTANCE, new RollupAction.Request(sourceIndex, rollupIndex, config), ActionListener.noop()); + var rollupListener = new ActionListener() { + boolean success; + + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + if (acknowledgedResponse.isAcknowledged()) { + success = true; + } else { + fail("Failed to receive rollup acknowledgement"); + } + } + + @Override + public void onFailure(Exception e) { + fail("Rollup failed: " + e.getMessage()); + } + }; + client().execute(RollupAction.INSTANCE, new RollupAction.Request(sourceIndex, rollupIndex, config), rollupListener); ResourceAlreadyExistsException exception = expectThrows( ResourceAlreadyExistsException.class, () -> rollup(sourceIndex, rollupIndex, config) ); assertThat(exception.getMessage(), containsString(rollupIndex)); + // We must wait until the in-progress rollup ends, otherwise data will not be cleaned up + assertBusy(() -> assertTrue("In progress rollup did not complete", rollupListener.success), 60, TimeUnit.SECONDS); } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/88800") @@ -521,23 +549,22 @@ private void bulkIndex(String indexName, SourceSupplier sourceSupplier) throws I private void prepareSourceIndex(String sourceIndex) { // Set the source index to read-only state - AcknowledgedResponse r = client().admin() - .indices() - .prepareUpdateSettings(sourceIndex) - .setSettings(Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true).build()) - .get(); - assertTrue(r.isAcknowledged()); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(sourceIndex) + .setSettings(Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true).build()) + .get() + ); } private void rollup(String sourceIndex, String rollupIndex, RollupActionConfig config) { - AcknowledgedResponse response = client().execute(RollupAction.INSTANCE, new RollupAction.Request(sourceIndex, rollupIndex, config)) - .actionGet(); - assertTrue(response.isAcknowledged()); + assertAcked(client().execute(RollupAction.INSTANCE, new RollupAction.Request(sourceIndex, rollupIndex, config)).actionGet()); } private RolloverResponse rollover(String dataStreamName) throws ExecutionException, InterruptedException { RolloverResponse response = client().admin().indices().rolloverIndex(new RolloverRequest(dataStreamName, null)).get(); - assertTrue(response.isAcknowledged()); + assertAcked(response); return response; } @@ -887,12 +914,8 @@ private String createDataStream() throws Exception { ); PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(dataStreamName + "_template") .indexTemplate(template); - AcknowledgedResponse response = client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); - - assertTrue(response.isAcknowledged()); - assertTrue( - client().execute(CreateDataStreamAction.INSTANCE, new CreateDataStreamAction.Request(dataStreamName)).get().isAcknowledged() - ); + assertAcked(client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet()); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, new CreateDataStreamAction.Request(dataStreamName)).get()); return dataStreamName; } } From 942e5fd9fca2e49e70c2b6b3f4e21884615368c1 Mon Sep 17 00:00:00 2001 From: Leaf-Lin <39002973+Leaf-Lin@users.noreply.github.com> Date: Thu, 4 Aug 2022 01:00:34 +1000 Subject: [PATCH 086/265] Adding specific items into troubleshooting guide (#88105) * Update troubleshooting.asciidoc Adding items into the troubleshooting guide * Resolve conflicts * Reorganizes troubleshooting links Co-authored-by: Abdon Pijpelink --- docs/reference/troubleshooting.asciidoc | 58 +++++++++++++++---- .../diagnose-unassigned-shards.asciidoc | 0 .../fix-common-cluster-issues.asciidoc | 11 +++- 3 files changed, 56 insertions(+), 13 deletions(-) rename docs/reference/troubleshooting/{data => common-issues}/diagnose-unassigned-shards.asciidoc (100%) diff --git a/docs/reference/troubleshooting.asciidoc b/docs/reference/troubleshooting.asciidoc index 5f1db93f1a8dd..25e57dcc8ed99 100644 --- a/docs/reference/troubleshooting.asciidoc +++ b/docs/reference/troubleshooting.asciidoc @@ -6,9 +6,45 @@ This section provides a series of troubleshooting solutions aimed at helping users fix problems that an {es} deployment might encounter. -Several troubleshooting issues can be diagnosed using the +[discrete] +[[troubleshooting-general]] +=== General +* <> +* Several troubleshooting issues can be diagnosed using the <>. +[discrete] +[[troubleshooting-data]] +=== Data +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +[discrete] +[[troubleshooting-management]] +=== Management +* <> +* <> + +[discrete] +[[troubleshooting-snapshot]] +=== Snapshot and restore +* <> +* <> + +[discrete] +[[troubleshooting-others]] +=== Others +* <> +* <> +* <> +* <> + If none of these solutions relate to your issue, you can still get help: * For users with an active subscription, you can get help in several ways: @@ -36,23 +72,21 @@ the experts in the community, including people from Elastic. include::troubleshooting/fix-common-cluster-issues.asciidoc[] -include::troubleshooting/data/increase-shard-limit.asciidoc[] +include::troubleshooting/data/add-tier.asciidoc[] -include::troubleshooting/data/increase-cluster-shard-limit.asciidoc[] +include::troubleshooting/data/enable-cluster-allocation.asciidoc[] include::troubleshooting/data/enable-index-allocation.asciidoc[] -include::troubleshooting/data/enable-cluster-allocation.asciidoc[] - include::troubleshooting/data/data-tiers-mixed-with-node-attr.asciidoc[] -include::troubleshooting/data/add-tier.asciidoc[] +include::troubleshooting/data/increase-tier-capacity.asciidoc[] -include::troubleshooting/data/diagnose-unassigned-shards.asciidoc[] +include::troubleshooting/data/increase-shard-limit.asciidoc[] -include::troubleshooting/discovery-issues.asciidoc[] +include::troubleshooting/data/increase-cluster-shard-limit.asciidoc[] -include::troubleshooting/data/increase-tier-capacity.asciidoc[] +include::troubleshooting/corruption-issues.asciidoc[] include::troubleshooting/data/start-ilm.asciidoc[] @@ -62,10 +96,10 @@ include::troubleshooting/data/restore-from-snapshot.asciidoc[] include::troubleshooting/snapshot/add-repository.asciidoc[] +include::troubleshooting/discovery-issues.asciidoc[] + include::monitoring/troubleshooting.asciidoc[] include::transform/troubleshooting.asciidoc[leveloffset=+1] -include::../../x-pack/docs/en/watcher/troubleshooting.asciidoc[] - -include::troubleshooting/corruption-issues.asciidoc[] +include::../../x-pack/docs/en/watcher/troubleshooting.asciidoc[] \ No newline at end of file diff --git a/docs/reference/troubleshooting/data/diagnose-unassigned-shards.asciidoc b/docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc similarity index 100% rename from docs/reference/troubleshooting/data/diagnose-unassigned-shards.asciidoc rename to docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc diff --git a/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc b/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc index 7433e25a43947..15876012376c2 100644 --- a/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc +++ b/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc @@ -32,10 +32,19 @@ When {es} rejects a request, it stops the operation and returns an error with a A backlogged task queue can prevent tasks from completing and put the cluster into an unhealthy state. +<>:: +There are multiple reasons why shards might get unassigned, ranging from +misconfigured allocation settings to lack of disk space. + +<>:: +A cluster in which nodes leave unexpectedly is unstable and can create several +issues. + include::common-issues/disk-usage-exceeded.asciidoc[] include::common-issues/circuit-breaker-errors.asciidoc[] include::common-issues/high-cpu-usage.asciidoc[] include::common-issues/high-jvm-memory-pressure.asciidoc[] include::common-issues/red-yellow-cluster-status.asciidoc[] include::common-issues/rejected-requests.asciidoc[] -include::common-issues/task-queue-backlog.asciidoc[] \ No newline at end of file +include::common-issues/task-queue-backlog.asciidoc[] +include::common-issues/diagnose-unassigned-shards.asciidoc[] \ No newline at end of file From 4e1a0631e860f20a116a2879a0dfe562bece9879 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcin=20S=C5=82owiak?= Date: Wed, 3 Aug 2022 17:21:35 +0200 Subject: [PATCH 087/265] Adjust logging message for adding index block (#85237) --- .../cluster/metadata/MetadataIndexStateService.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index 35255e066f751..9d8777da6c733 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -433,7 +433,11 @@ private static Tuple> addIndexBlock( } } - logger.info("adding block {} to indices {}", block.name, blockedIndices.keySet().stream().map(Object::toString).toList()); + logger.info( + "adding [index.blocks.{}] block to indices {}", + block.name, + blockedIndices.keySet().stream().map(Object::toString).toList() + ); return Tuple.tuple(ClusterState.builder(currentState).blocks(blocks).metadata(metadata).build(), blockedIndices); } @@ -1051,7 +1055,7 @@ private static Tuple> finalizeBlock( logger.debug("index {} has been deleted since blocking it started, ignoring", index); } } - logger.info("completed adding block {} to indices {}", block.name, effectivelyBlockedIndices); + logger.info("completed adding [index.blocks.{}] block to indices {}", block.name, effectivelyBlockedIndices); return Tuple.tuple(ClusterState.builder(currentState).blocks(blocks).build(), List.copyOf(blockingResults.values())); } From 77aa8c03e12c8bb511fd00f580921119fc69086c Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 3 Aug 2022 18:55:05 +0300 Subject: [PATCH 088/265] [ML] Include start params in _stats for non-started model deployments (#89091) Adds the missing start parameters to the _stats API response for non-started deployments. --- docs/changelog/89091.yaml | 5 ++ .../xpack/ml/integration/PyTorchModelIT.java | 65 +++++++++++++++++++ .../TransportGetDeploymentStatsAction.java | 12 +++- 3 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/89091.yaml diff --git a/docs/changelog/89091.yaml b/docs/changelog/89091.yaml new file mode 100644 index 0000000000000..463c794db8295 --- /dev/null +++ b/docs/changelog/89091.yaml @@ -0,0 +1,5 @@ +pr: 89091 +summary: Include start params in `_stats` for non-started model deployments +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index 9498b58bb5b22..b965e75e91c5c 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -813,6 +813,71 @@ public void testStartDeployment_TooManyAllocations() throws IOException { assertThat(EntityUtils.toString(response.getEntity()), not(containsString("deployment_stats"))); } + @SuppressWarnings("unchecked") + public void testStartDeployment_GivenNoProcessorsLeft_AndLazyStartEnabled() throws Exception { + // We start 2 models. The first needs so many allocations it won't possibly + // get them all. This would leave no space to allocate the second model at all. + + // Enable lazy starting so that the deployments start even if they cannot get fully allocated. + // The setting is cleared in the cleanup method of these tests. + Request loggingSettings = new Request("PUT", "_cluster/settings"); + loggingSettings.setJsonEntity(""" + {"persistent" : { + "xpack.ml.max_lazy_ml_nodes": 5 + }}"""); + client().performRequest(loggingSettings); + + String modelId1 = "model_1"; + createTrainedModel(modelId1); + putModelDefinition(modelId1); + putVocabulary(List.of("these", "are", "my", "words"), modelId1); + + String modelId2 = "model_2"; + createTrainedModel(modelId2); + putModelDefinition(modelId2); + putVocabulary(List.of("these", "are", "my", "words"), modelId2); + + startDeployment(modelId1, AllocationStatus.State.STARTED.toString(), 100, 1); + + { + Request request = new Request( + "POST", + "/_ml/trained_models/" + + modelId2 + + "/deployment/_start?timeout=40s&wait_for=starting&" + + "number_of_allocations=4&threads_per_allocation=2&queue_capacity=500&cache_size=100Kb" + ); + client().performRequest(request); + } + + // Check second model did not get any allocations + assertAllocationCount(modelId2, 0); + + // Verify stats shows model is starting and deployment settings are present + { + Response statsResponse = getTrainedModelStats(modelId2); + var responseMap = entityAsMap(statsResponse); + List> stats = (List>) responseMap.get("trained_model_stats"); + assertThat(stats, hasSize(1)); + String statusState = (String) XContentMapValues.extractValue("deployment_stats.allocation_status.state", stats.get(0)); + assertThat(statusState, equalTo("starting")); + int numberOfAllocations = (int) XContentMapValues.extractValue("deployment_stats.number_of_allocations", stats.get(0)); + assertThat(numberOfAllocations, equalTo(4)); + int threadsPerAllocation = (int) XContentMapValues.extractValue("deployment_stats.threads_per_allocation", stats.get(0)); + assertThat(threadsPerAllocation, equalTo(2)); + int queueCapacity = (int) XContentMapValues.extractValue("deployment_stats.queue_capacity", stats.get(0)); + assertThat(queueCapacity, equalTo(500)); + ByteSizeValue cacheSize = ByteSizeValue.parseBytesSizeValue( + (String) XContentMapValues.extractValue("deployment_stats.cache_size", stats.get(0)), + "cache_size)" + ); + assertThat(cacheSize, equalTo(ByteSizeValue.ofKb(100))); + } + + stopDeployment(modelId1); + stopDeployment(modelId2); + } + @SuppressWarnings("unchecked") private void assertAllocationCount(String modelId, int expectedAllocationCount) throws IOException { Response response = getTrainedModelStats(modelId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index 0154fd6d7d5ba..d9d7f9a5a7150 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -269,7 +269,17 @@ static GetDeploymentStatsAction.Response addFailedRoutes( nodeStats.sort(Comparator.comparing(n -> n.getNode().getId())); - updatedAssignmentStats.add(new AssignmentStats(modelId, null, null, null, null, assignment.getStartTime(), nodeStats)); + updatedAssignmentStats.add( + new AssignmentStats( + modelId, + assignment.getTaskParams().getThreadsPerAllocation(), + assignment.getTaskParams().getNumberOfAllocations(), + assignment.getTaskParams().getQueueCapacity(), + assignment.getTaskParams().getCacheSize().orElse(null), + assignment.getStartTime(), + nodeStats + ) + ); } } From 8c21d03f7aab7a24a31471d46b7321161bff4bcc Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 3 Aug 2022 17:09:26 +0100 Subject: [PATCH 089/265] [ML] Move PyTorch request ID and cache hit indicator to top level (#88901) This change will facilitate a performance improvement on the C++ side. The request ID and cache hit indicator are the parts that need to be changed when the C++ process responds to an inference request. Having them at the top level means we do not need to parse and manipulate the original response - we can simply cache the inner object of the response and add the outer fields around it when serializing it. Companion to elastic/ml-cpp#2376 --- .../xpack/ml/integration/PyTorchModelIT.java | 1 - .../process/PyTorchResultProcessor.java | 65 ++++++++++---- .../inference/pytorch/results/AckResult.java | 37 ++++++++ .../pytorch/results/ErrorResult.java | 8 +- .../results/PyTorchInferenceResult.java | 38 +------- .../pytorch/results/PyTorchResult.java | 35 +++++++- .../pytorch/results/ThreadSettings.java | 8 +- .../inference/nlp/FillMaskProcessorTests.java | 4 +- .../ml/inference/nlp/NerProcessorTests.java | 10 +-- .../nlp/QuestionAnsweringProcessorTests.java | 2 +- .../nlp/TextClassificationProcessorTests.java | 4 +- .../nlp/TextSimilarityProcessorTests.java | 4 +- .../process/PyTorchResultProcessorTests.java | 89 ++++++++++--------- .../pytorch/results/AckResultTests.java | 35 ++++++++ .../pytorch/results/ErrorResultTests.java | 2 +- .../results/PyTorchInferenceResultTests.java | 4 +- .../pytorch/results/PyTorchResultTests.java | 18 +++- .../pytorch/results/ThreadSettingsTests.java | 6 +- .../test/ml/3rd_party_deployment.yml | 2 - ...MLModelDeploymentFullClusterRestartIT.java | 1 - .../upgrades/MLModelDeploymentsUpgradeIT.java | 1 - 21 files changed, 236 insertions(+), 138 deletions(-) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/AckResult.java create mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/AckResultTests.java diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index b965e75e91c5c..9a16b50d73235 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -75,7 +75,6 @@ * torch.jit.save(traced_model, "simplemodel.pt") * ## End Python */ -@ESRestTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/ml-cpp/pull/2376") public class PyTorchModelIT extends ESRestTestCase { private static final String BASIC_AUTH_VALUE_SUPER_USER = UsernamePasswordToken.basicAuthHeaderValue( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java index c430d2a873a6f..89bc976dbb60b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.core.ml.utils.Intervals; +import org.elasticsearch.xpack.ml.inference.pytorch.results.AckResult; import org.elasticsearch.xpack.ml.inference.pytorch.results.ErrorResult; import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchInferenceResult; import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchResult; @@ -105,10 +106,12 @@ public void process(PyTorchProcess process) { threadSettingsConsumer.accept(threadSettings); processThreadSettings(result); } + if (result.ackResult() != null) { + processAcknowledgement(result); + } if (result.errorResult() != null) { processErrorResult(result); } - } } catch (Exception e) { // No need to report error as we're stopping @@ -118,10 +121,13 @@ public void process(PyTorchProcess process) { pendingResults.forEach( (id, pendingResult) -> pendingResult.listener.onResponse( new PyTorchResult( + id, + null, + null, + null, null, null, new ErrorResult( - id, isStopping ? "inference canceled as process is stopping" : "inference native process died unexpectedly with failure [" + e.getMessage() + "]" @@ -133,7 +139,7 @@ public void process(PyTorchProcess process) { } finally { pendingResults.forEach( (id, pendingResult) -> pendingResult.listener.onResponse( - new PyTorchResult(null, null, new ErrorResult(id, "inference canceled as process is stopping")) + new PyTorchResult(id, false, null, null, null, null, new ErrorResult("inference canceled as process is stopping")) ) ); pendingResults.clear(); @@ -144,12 +150,17 @@ public void process(PyTorchProcess process) { void processInferenceResult(PyTorchResult result) { PyTorchInferenceResult inferenceResult = result.inferenceResult(); assert inferenceResult != null; + Long timeMs = result.timeMs(); + if (timeMs == null) { + assert false : "time_ms should be set for an inference result"; + timeMs = 0L; + } - logger.trace(() -> format("[%s] Parsed result with id [%s]", deploymentId, inferenceResult.getRequestId())); - processResult(inferenceResult); - PendingResult pendingResult = pendingResults.remove(inferenceResult.getRequestId()); + logger.trace(() -> format("[%s] Parsed inference result with id [%s]", deploymentId, result.requestId())); + processResult(inferenceResult, timeMs, Boolean.TRUE.equals(result.isCacheHit())); + PendingResult pendingResult = pendingResults.remove(result.requestId()); if (pendingResult == null) { - logger.debug(() -> format("[%s] no pending result for [%s]", deploymentId, inferenceResult.getRequestId())); + logger.debug(() -> format("[%s] no pending result for inference [%s]", deploymentId, result.requestId())); } else { pendingResult.listener.onResponse(result); } @@ -159,10 +170,23 @@ void processThreadSettings(PyTorchResult result) { ThreadSettings threadSettings = result.threadSettings(); assert threadSettings != null; - logger.trace(() -> format("[%s] Parsed result with id [%s]", deploymentId, threadSettings.requestId())); - PendingResult pendingResult = pendingResults.remove(threadSettings.requestId()); + logger.trace(() -> format("[%s] Parsed thread settings result with id [%s]", deploymentId, result.requestId())); + PendingResult pendingResult = pendingResults.remove(result.requestId()); + if (pendingResult == null) { + logger.debug(() -> format("[%s] no pending result for thread settings [%s]", deploymentId, result.requestId())); + } else { + pendingResult.listener.onResponse(result); + } + } + + void processAcknowledgement(PyTorchResult result) { + AckResult ack = result.ackResult(); + assert ack != null; + + logger.trace(() -> format("[%s] Parsed ack result with id [%s]", deploymentId, result.requestId())); + PendingResult pendingResult = pendingResults.remove(result.requestId()); if (pendingResult == null) { - logger.debug(() -> format("[%s] no pending result for [%s]", deploymentId, threadSettings.requestId())); + logger.debug(() -> format("[%s] no pending result for ack [%s]", deploymentId, result.requestId())); } else { pendingResult.listener.onResponse(result); } @@ -172,12 +196,15 @@ void processErrorResult(PyTorchResult result) { ErrorResult errorResult = result.errorResult(); assert errorResult != null; - errorCount++; + // Only one result is processed at any time, but we need to stop this happening part way through another thread getting stats + synchronized (this) { + errorCount++; + } - logger.trace(() -> format("[%s] Parsed error with id [%s]", deploymentId, errorResult.requestId())); - PendingResult pendingResult = pendingResults.remove(errorResult.requestId()); + logger.trace(() -> format("[%s] Parsed error with id [%s]", deploymentId, result.requestId())); + PendingResult pendingResult = pendingResults.remove(result.requestId()); if (pendingResult == null) { - logger.debug(() -> format("[%s] no pending result for [%s]", deploymentId, errorResult.requestId())); + logger.debug(() -> format("[%s] no pending result for error [%s]", deploymentId, result.requestId())); } else { pendingResult.listener.onResponse(result); } @@ -218,8 +245,8 @@ public synchronized ResultStats getResultStats() { ); } - private synchronized void processResult(PyTorchInferenceResult result) { - timingStats.accept(result.getTimeMs()); + private synchronized void processResult(PyTorchInferenceResult result, long timeMs, boolean isCacheHit) { + timingStats.accept(timeMs); lastResultTimeMs = currentTimeMsSupplier.getAsLong(); if (lastResultTimeMs > currentPeriodEndTimeMs) { @@ -240,15 +267,15 @@ private synchronized void processResult(PyTorchInferenceResult result) { lastPeriodCacheHitCount = 0; lastPeriodSummaryStats = new LongSummaryStatistics(); - lastPeriodSummaryStats.accept(result.getTimeMs()); + lastPeriodSummaryStats.accept(timeMs); // set to the end of the current bucket currentPeriodEndTimeMs = startTime + Intervals.alignToCeil(lastResultTimeMs - startTime, REPORTING_PERIOD_MS); } else { - lastPeriodSummaryStats.accept(result.getTimeMs()); + lastPeriodSummaryStats.accept(timeMs); } - if (result.isCacheHit()) { + if (isCacheHit) { cacheHitCount++; lastPeriodCacheHitCount++; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/AckResult.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/AckResult.java new file mode 100644 index 0000000000000..06f2679c56c2d --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/AckResult.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.pytorch.results; + +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +public record AckResult(boolean acknowledged) implements ToXContentObject { + + public static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); + + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "ack", + a -> new AckResult((Boolean) a[0]) + ); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ACKNOWLEDGED); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ACKNOWLEDGED.getPreferredName(), acknowledged); + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ErrorResult.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ErrorResult.java index 20e0855a50b3e..68fc5cc589231 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ErrorResult.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ErrorResult.java @@ -14,26 +14,22 @@ import java.io.IOException; -public record ErrorResult(String requestId, String error) implements ToXContentObject { +public record ErrorResult(String error) implements ToXContentObject { public static final ParseField ERROR = new ParseField("error"); public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "error", - a -> new ErrorResult((String) a[0], (String) a[1]) + a -> new ErrorResult((String) a[0]) ); static { - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PyTorchResult.REQUEST_ID); PARSER.declareString(ConstructingObjectParser.constructorArg(), ERROR); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - if (requestId != null) { - builder.field(PyTorchResult.REQUEST_ID.getPreferredName(), requestId); - } builder.field(ERROR.getPreferredName(), error); builder.endObject(); return builder; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchInferenceResult.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchInferenceResult.java index c4636f3110f4a..a1482851fc21d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchInferenceResult.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchInferenceResult.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Objects; /** * All results must have a request_id. @@ -28,62 +27,38 @@ public class PyTorchInferenceResult implements ToXContentObject { private static final ParseField INFERENCE = new ParseField("inference"); - private static final ParseField TIME_MS = new ParseField("time_ms"); - private static final ParseField CACHE_HIT = new ParseField("cache_hit"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "pytorch_inference_result", - a -> new PyTorchInferenceResult((String) a[0], (double[][][]) a[1], (Long) a[2], (Boolean) a[3]) + a -> new PyTorchInferenceResult((double[][][]) a[0]) ); static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), PyTorchResult.REQUEST_ID); PARSER.declareField( ConstructingObjectParser.optionalConstructorArg(), (p, c) -> MlParserUtils.parse3DArrayOfDoubles(INFERENCE.getPreferredName(), p), INFERENCE, ObjectParser.ValueType.VALUE_ARRAY ); - PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_MS); - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), CACHE_HIT); } public static PyTorchInferenceResult fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } - private final String requestId; private final double[][][] inference; - private final long timeMs; - private final boolean cacheHit; - public PyTorchInferenceResult(String requestId, @Nullable double[][][] inference, long timeMs, boolean cacheHit) { - this.requestId = Objects.requireNonNull(requestId); + public PyTorchInferenceResult(@Nullable double[][][] inference) { this.inference = inference; - this.timeMs = timeMs; - this.cacheHit = cacheHit; - } - - public String getRequestId() { - return requestId; } public double[][][] getInferenceResult() { return inference; } - public long getTimeMs() { - return timeMs; - } - - public boolean isCacheHit() { - return cacheHit; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(PyTorchResult.REQUEST_ID.getPreferredName(), requestId); if (inference != null) { builder.startArray(INFERENCE.getPreferredName()); for (double[][] doubles : inference) { @@ -95,15 +70,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); } - builder.field(TIME_MS.getPreferredName(), timeMs); - builder.field(CACHE_HIT.getPreferredName(), cacheHit); builder.endObject(); return builder; } @Override public int hashCode() { - return Objects.hash(requestId, timeMs, Arrays.deepHashCode(inference), cacheHit); + return Arrays.deepHashCode(inference); } @Override @@ -112,9 +85,6 @@ public boolean equals(Object other) { if (other == null || getClass() != other.getClass()) return false; PyTorchInferenceResult that = (PyTorchInferenceResult) other; - return Objects.equals(requestId, that.requestId) - && Arrays.deepEquals(inference, that.inference) - && timeMs == that.timeMs - && cacheHit == that.cacheHit; + return Arrays.deepEquals(inference, that.inference); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchResult.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchResult.java index f27037e38617d..11340d0bf542d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchResult.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchResult.java @@ -19,24 +19,43 @@ * The top level object capturing output from the pytorch process. */ public record PyTorchResult( + String requestId, + Boolean isCacheHit, + Long timeMs, @Nullable PyTorchInferenceResult inferenceResult, @Nullable ThreadSettings threadSettings, + @Nullable AckResult ackResult, @Nullable ErrorResult errorResult ) implements ToXContentObject { - static final ParseField REQUEST_ID = new ParseField("request_id"); + private static final ParseField REQUEST_ID = new ParseField("request_id"); + private static final ParseField CACHE_HIT = new ParseField("cache_hit"); + private static final ParseField TIME_MS = new ParseField("time_ms"); private static final ParseField RESULT = new ParseField("result"); private static final ParseField THREAD_SETTINGS = new ParseField("thread_settings"); + private static final ParseField ACK = new ParseField("ack"); public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "pytorch_result", - a -> new PyTorchResult((PyTorchInferenceResult) a[0], (ThreadSettings) a[1], (ErrorResult) a[2]) + a -> new PyTorchResult( + (String) a[0], + (Boolean) a[1], + (Long) a[2], + (PyTorchInferenceResult) a[3], + (ThreadSettings) a[4], + (AckResult) a[5], + (ErrorResult) a[6] + ) ); static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), REQUEST_ID); + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), CACHE_HIT); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), TIME_MS); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), PyTorchInferenceResult.PARSER, RESULT); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ThreadSettings.PARSER, THREAD_SETTINGS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), AckResult.PARSER, ACK); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ErrorResult.PARSER, ErrorResult.ERROR); } @@ -47,12 +66,24 @@ public boolean isError() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); + if (requestId != null) { + builder.field(REQUEST_ID.getPreferredName(), requestId); + } + if (isCacheHit != null) { + builder.field(CACHE_HIT.getPreferredName(), isCacheHit); + } + if (timeMs != null) { + builder.field(TIME_MS.getPreferredName(), timeMs); + } if (inferenceResult != null) { builder.field(RESULT.getPreferredName(), inferenceResult); } if (threadSettings != null) { builder.field(THREAD_SETTINGS.getPreferredName(), threadSettings); } + if (ackResult != null) { + builder.field(ACK.getPreferredName(), ackResult); + } if (errorResult != null) { builder.field(ErrorResult.ERROR.getPreferredName(), errorResult); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ThreadSettings.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ThreadSettings.java index 3d2ad6997545d..9154d33c04574 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ThreadSettings.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ThreadSettings.java @@ -14,20 +14,19 @@ import java.io.IOException; -public record ThreadSettings(int numThreadsPerAllocation, int numAllocations, String requestId) implements ToXContentObject { +public record ThreadSettings(int numThreadsPerAllocation, int numAllocations) implements ToXContentObject { private static final ParseField NUM_ALLOCATIONS = new ParseField("num_allocations"); private static final ParseField NUM_THREADS_PER_ALLOCATION = new ParseField("num_threads_per_allocation"); public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "thread_settings", - a -> new ThreadSettings((int) a[0], (int) a[1], (String) a[2]) + a -> new ThreadSettings((int) a[0], (int) a[1]) ); static { PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUM_THREADS_PER_ALLOCATION); PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUM_ALLOCATIONS); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PyTorchResult.REQUEST_ID); } @Override @@ -35,9 +34,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(NUM_THREADS_PER_ALLOCATION.getPreferredName(), numThreadsPerAllocation); builder.field(NUM_ALLOCATIONS.getPreferredName(), numAllocations); - if (requestId != null) { - builder.field(PyTorchResult.REQUEST_ID.getPreferredName(), requestId); - } builder.endObject(); return builder; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessorTests.java index b36ce41c5c49d..f3afb0286f076 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/FillMaskProcessorTests.java @@ -64,7 +64,7 @@ public void testProcessResults() { String resultsField = randomAlphaOfLength(10); FillMaskResults result = (FillMaskResults) FillMaskProcessor.processResult( tokenization, - new PyTorchInferenceResult("1", scores, 0L, false), + new PyTorchInferenceResult(scores), tokenizer, 4, resultsField @@ -91,7 +91,7 @@ public void testProcessResults_GivenMissingTokens() { 0 ); - PyTorchInferenceResult pyTorchResult = new PyTorchInferenceResult("1", new double[][][] { { {} } }, 0L, false); + PyTorchInferenceResult pyTorchResult = new PyTorchInferenceResult(new double[][][] { { {} } }); expectThrows( ElasticsearchStatusException.class, () -> FillMaskProcessor.processResult(tokenization, pyTorchResult, tokenizer, 5, randomAlphaOfLength(10)) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/NerProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/NerProcessorTests.java index 416beaee9d3db..389a4fab802a0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/NerProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/NerProcessorTests.java @@ -72,7 +72,7 @@ public void testProcessResults_GivenNoTokens() { var e = expectThrows( ElasticsearchStatusException.class, - () -> processor.processResult(tokenization, new PyTorchInferenceResult("test", null, 0L, false)) + () -> processor.processResult(tokenization, new PyTorchInferenceResult(null)) ); assertThat(e, instanceOf(ElasticsearchStatusException.class)); } @@ -113,7 +113,7 @@ public void testProcessResultsWithSpecialTokens() { { 0, 0, 0, 0, 0, 0, 0, 6, 0 }, // london { 7, 0, 0, 0, 0, 0, 0, 0, 0 } // sep } }; - NerResults result = (NerResults) processor.processResult(tokenization, new PyTorchInferenceResult("1", scores, 1L, false)); + NerResults result = (NerResults) processor.processResult(tokenization, new PyTorchInferenceResult(scores)); assertThat(result.getAnnotatedResult(), equalTo("Many use [Elasticsearch](ORG&Elasticsearch) in [London](LOC&London)")); assertThat(result.getEntityGroups().size(), equalTo(2)); @@ -141,7 +141,7 @@ public void testProcessResults() { { 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // in { 0, 0, 0, 0, 0, 0, 0, 6, 0 } // london } }; - NerResults result = (NerResults) processor.processResult(tokenization, new PyTorchInferenceResult("1", scores, 1L, false)); + NerResults result = (NerResults) processor.processResult(tokenization, new PyTorchInferenceResult(scores)); assertThat(result.getAnnotatedResult(), equalTo("Many use [Elasticsearch](ORG&Elasticsearch) in [London](LOC&London)")); assertThat(result.getEntityGroups().size(), equalTo(2)); @@ -178,7 +178,7 @@ public void testProcessResults_withIobMap() { { 0, 0, 0, 0, 0, 0, 0, 0, 5 }, // in { 6, 0, 0, 0, 0, 0, 0, 0, 0 } // london } }; - NerResults result = (NerResults) processor.processResult(tokenization, new PyTorchInferenceResult("1", scores, 1L, false)); + NerResults result = (NerResults) processor.processResult(tokenization, new PyTorchInferenceResult(scores)); assertThat(result.getAnnotatedResult(), equalTo("[Elasticsearch](ORG&Elasticsearch) in [London](LOC&London)")); assertThat(result.getEntityGroups().size(), equalTo(2)); @@ -211,7 +211,7 @@ public void testProcessResults_withCustomIobMap() { { 0, 0, 0, 0, 5 }, // in { 6, 0, 0, 0, 0 } // london } }; - NerResults result = (NerResults) processor.processResult(tokenization, new PyTorchInferenceResult("1", scores, 1L, false)); + NerResults result = (NerResults) processor.processResult(tokenization, new PyTorchInferenceResult(scores)); assertThat(result.getAnnotatedResult(), equalTo("[Elasticsearch](SOFTWARE&Elasticsearch) in [London](LOC&London)")); assertThat(result.getEntityGroups().size(), equalTo(2)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessorTests.java index f988da404bdb3..ab8bdf4870973 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/QuestionAnsweringProcessorTests.java @@ -87,7 +87,7 @@ public void testProcessor() throws IOException { assertThat(tokenizationResult.getTokenization(0).seqPairOffset(), equalTo(7)); double[][][] scores = { { START_TOKEN_SCORES }, { END_TOKEN_SCORES } }; NlpTask.ResultProcessor resultProcessor = processor.getResultProcessor(config); - PyTorchInferenceResult pyTorchResult = new PyTorchInferenceResult("1", scores, 1L, false); + PyTorchInferenceResult pyTorchResult = new PyTorchInferenceResult(scores); QuestionAnsweringInferenceResults result = (QuestionAnsweringInferenceResults) resultProcessor.processResult( tokenizationResult, pyTorchResult diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextClassificationProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextClassificationProcessorTests.java index 357a0bd1bd611..3b48e75846243 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextClassificationProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextClassificationProcessorTests.java @@ -32,7 +32,7 @@ public class TextClassificationProcessorTests extends ESTestCase { public void testInvalidResult() { { - PyTorchInferenceResult torchResult = new PyTorchInferenceResult("foo", new double[][][] {}, 0L, false); + PyTorchInferenceResult torchResult = new PyTorchInferenceResult(new double[][][] {}); var e = expectThrows( ElasticsearchStatusException.class, () -> TextClassificationProcessor.processResult(null, torchResult, randomInt(), List.of("a", "b"), randomAlphaOfLength(10)) @@ -41,7 +41,7 @@ public void testInvalidResult() { assertThat(e.getMessage(), containsString("Text classification result has no data")); } { - PyTorchInferenceResult torchResult = new PyTorchInferenceResult("foo", new double[][][] { { { 1.0 } } }, 0L, false); + PyTorchInferenceResult torchResult = new PyTorchInferenceResult(new double[][][] { { { 1.0 } } }); var e = expectThrows( ElasticsearchStatusException.class, () -> TextClassificationProcessor.processResult(null, torchResult, randomInt(), List.of("a", "b"), randomAlphaOfLength(10)) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java index 5601fd6b8baa8..10be6225163b6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java @@ -51,7 +51,7 @@ public void testProcessor() throws IOException { assertThat(tokenizationResult.getTokenization(0).seqPairOffset(), equalTo(7)); double[][][] scores = { { { 42 } } }; NlpTask.ResultProcessor resultProcessor = processor.getResultProcessor(textSimilarityConfig); - PyTorchInferenceResult pyTorchResult = new PyTorchInferenceResult("1", scores, 1L, false); + PyTorchInferenceResult pyTorchResult = new PyTorchInferenceResult(scores); TextSimilarityInferenceResults result = (TextSimilarityInferenceResults) resultProcessor.processResult( tokenizationResult, pyTorchResult @@ -74,7 +74,7 @@ public void testResultFunctions() { TextSimilarityProcessor processor = new TextSimilarityProcessor(tokenizer); NlpTask.ResultProcessor resultProcessor = processor.getResultProcessor(textSimilarityConfig); double[][][] scores = { { { 42 }, { 12 }, { 100 } } }; - PyTorchInferenceResult pyTorchResult = new PyTorchInferenceResult("1", scores, 1L, false); + PyTorchInferenceResult pyTorchResult = new PyTorchInferenceResult(scores); TextSimilarityInferenceResults result = (TextSimilarityInferenceResults) resultProcessor.processResult( new BertTokenizationResult(List.of(), List.of(), 1), pyTorchResult diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java index 11eb75ac91bdf..98da8da4b686a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ml.inference.pytorch.results.AckResult; import org.elasticsearch.xpack.ml.inference.pytorch.results.ErrorResult; import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchInferenceResult; import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchResult; @@ -24,7 +25,6 @@ import static org.elasticsearch.xpack.ml.inference.pytorch.process.PyTorchResultProcessor.REPORTING_PERIOD_MS; import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.comparesEqualTo; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -37,40 +37,47 @@ public void testsThreadSettings() { var settingsHolder = new AtomicReference(); var processor = new PyTorchResultProcessor("deployment-foo", settingsHolder::set); - var settings = new ThreadSettings(1, 1, "thread-setting"); + var settings = new ThreadSettings(1, 1); processor.registerRequest("thread-setting", new AssertingResultListener(r -> assertEquals(settings, r.threadSettings()))); - processor.process(mockNativeProcess(List.of(new PyTorchResult(null, settings, null)).iterator())); + processor.process( + mockNativeProcess(List.of(new PyTorchResult("thread-setting", null, null, null, settings, null, null)).iterator()) + ); assertEquals(settings, settingsHolder.get()); } public void testResultsProcessing() { - var inferenceResult = new PyTorchInferenceResult("a", null, 1000L, false); - var threadSettings = new ThreadSettings(1, 1, "b"); - var errorResult = new ErrorResult("c", "a bad thing has happened"); + var inferenceResult = new PyTorchInferenceResult(null); + var threadSettings = new ThreadSettings(1, 1); + var ack = new AckResult(true); + var errorResult = new ErrorResult("a bad thing has happened"); var inferenceListener = new AssertingResultListener(r -> assertEquals(inferenceResult, r.inferenceResult())); var threadSettingsListener = new AssertingResultListener(r -> assertEquals(threadSettings, r.threadSettings())); + var ackListener = new AssertingResultListener(r -> assertEquals(ack, r.ackResult())); var errorListener = new AssertingResultListener(r -> assertEquals(errorResult, r.errorResult())); var processor = new PyTorchResultProcessor("foo", s -> {}); processor.registerRequest("a", inferenceListener); processor.registerRequest("b", threadSettingsListener); - processor.registerRequest("c", errorListener); + processor.registerRequest("c", ackListener); + processor.registerRequest("d", errorListener); processor.process( mockNativeProcess( List.of( - new PyTorchResult(inferenceResult, null, null), - new PyTorchResult(null, threadSettings, null), - new PyTorchResult(null, null, errorResult) + new PyTorchResult("a", true, 1000L, inferenceResult, null, null, null), + new PyTorchResult("b", null, null, null, threadSettings, null, null), + new PyTorchResult("c", null, null, null, null, ack, null), + new PyTorchResult("d", null, null, null, null, null, errorResult) ).iterator() ) ); assertTrue(inferenceListener.hasResponse); assertTrue(threadSettingsListener.hasResponse); + assertTrue(ackListener.hasResponse); assertTrue(errorListener.hasResponse); } @@ -86,9 +93,9 @@ public void testPendingRequest() { ); processor.registerRequest("b", calledOnShutdown); - var inferenceResult = new PyTorchInferenceResult("a", null, 1000L, false); + var inferenceResult = new PyTorchInferenceResult(null); - processor.process(mockNativeProcess(List.of(new PyTorchResult(inferenceResult, null, null)).iterator())); + processor.process(mockNativeProcess(List.of(new PyTorchResult("a", false, 1000L, inferenceResult, null, null, null)).iterator())); assertSame(inferenceResult, resultHolder.get()); assertTrue(calledOnShutdown.hasResponse); } @@ -100,8 +107,8 @@ public void testCancelPendingRequest() { processor.ignoreResponseWithoutNotifying("a"); - var inferenceResult = new PyTorchInferenceResult("a", null, 1000L, false); - processor.process(mockNativeProcess(List.of(new PyTorchResult(inferenceResult, null, null)).iterator())); + var inferenceResult = new PyTorchInferenceResult(null); + processor.process(mockNativeProcess(List.of(new PyTorchResult("a", false, 1000L, inferenceResult, null, null, null)).iterator())); } public void testPendingRequestAreCalledAtShutdown() { @@ -146,8 +153,8 @@ public void onFailure(Exception e) { } } - private PyTorchResult wrapInferenceResult(PyTorchInferenceResult result) { - return new PyTorchResult(result, null, null); + private PyTorchResult wrapInferenceResult(String requestId, boolean isCacheHit, long timeMs, PyTorchInferenceResult result) { + return new PyTorchResult(requestId, isCacheHit, timeMs, result, null, null, null); } public void testsStats() { @@ -161,33 +168,33 @@ public void testsStats() { processor.registerRequest("b", pendingB); processor.registerRequest("c", pendingC); - var a = wrapInferenceResult(new PyTorchInferenceResult("a", null, 1000L, false)); - var b = wrapInferenceResult(new PyTorchInferenceResult("b", null, 900L, false)); - var c = wrapInferenceResult(new PyTorchInferenceResult("c", null, 200L, true)); + var a = wrapInferenceResult("a", false, 1000L, new PyTorchInferenceResult(null)); + var b = wrapInferenceResult("b", false, 900L, new PyTorchInferenceResult(null)); + var c = wrapInferenceResult("c", true, 200L, new PyTorchInferenceResult(null)); processor.processInferenceResult(a); var stats = processor.getResultStats(); - assertThat(stats.errorCount(), comparesEqualTo(0)); + assertThat(stats.errorCount(), equalTo(0)); assertThat(stats.cacheHitCount(), equalTo(0L)); - assertThat(stats.numberOfPendingResults(), comparesEqualTo(2)); - assertThat(stats.timingStats().getCount(), comparesEqualTo(1L)); - assertThat(stats.timingStats().getSum(), comparesEqualTo(1000L)); + assertThat(stats.numberOfPendingResults(), equalTo(2)); + assertThat(stats.timingStats().getCount(), equalTo(1L)); + assertThat(stats.timingStats().getSum(), equalTo(1000L)); processor.processInferenceResult(b); stats = processor.getResultStats(); - assertThat(stats.errorCount(), comparesEqualTo(0)); + assertThat(stats.errorCount(), equalTo(0)); assertThat(stats.cacheHitCount(), equalTo(0L)); - assertThat(stats.numberOfPendingResults(), comparesEqualTo(1)); - assertThat(stats.timingStats().getCount(), comparesEqualTo(2L)); - assertThat(stats.timingStats().getSum(), comparesEqualTo(1900L)); + assertThat(stats.numberOfPendingResults(), equalTo(1)); + assertThat(stats.timingStats().getCount(), equalTo(2L)); + assertThat(stats.timingStats().getSum(), equalTo(1900L)); processor.processInferenceResult(c); stats = processor.getResultStats(); - assertThat(stats.errorCount(), comparesEqualTo(0)); + assertThat(stats.errorCount(), equalTo(0)); assertThat(stats.cacheHitCount(), equalTo(1L)); - assertThat(stats.numberOfPendingResults(), comparesEqualTo(0)); - assertThat(stats.timingStats().getCount(), comparesEqualTo(3L)); - assertThat(stats.timingStats().getSum(), comparesEqualTo(2100L)); + assertThat(stats.numberOfPendingResults(), equalTo(0)); + assertThat(stats.timingStats().getCount(), equalTo(3L)); + assertThat(stats.timingStats().getSum(), equalTo(2100L)); } public void testsTimeDependentStats() { @@ -227,9 +234,9 @@ public void testsTimeDependentStats() { var processor = new PyTorchResultProcessor("foo", s -> {}, timeSupplier); // 1st period - processor.processInferenceResult(wrapInferenceResult(new PyTorchInferenceResult("foo", null, 200L, false))); - processor.processInferenceResult(wrapInferenceResult(new PyTorchInferenceResult("foo", null, 200L, false))); - processor.processInferenceResult(wrapInferenceResult(new PyTorchInferenceResult("foo", null, 200L, false))); + processor.processInferenceResult(wrapInferenceResult("foo", false, 200L, new PyTorchInferenceResult(null))); + processor.processInferenceResult(wrapInferenceResult("foo", false, 200L, new PyTorchInferenceResult(null))); + processor.processInferenceResult(wrapInferenceResult("foo", false, 200L, new PyTorchInferenceResult(null))); // first call has no results as is in the same period var stats = processor.getResultStats(); assertThat(stats.recentStats().requestsProcessed(), equalTo(0L)); @@ -243,7 +250,7 @@ public void testsTimeDependentStats() { assertThat(stats.peakThroughput(), equalTo(3L)); // 2nd period - processor.processInferenceResult(wrapInferenceResult(new PyTorchInferenceResult("foo", null, 100L, false))); + processor.processInferenceResult(wrapInferenceResult("foo", false, 100L, new PyTorchInferenceResult(null))); stats = processor.getResultStats(); assertNotNull(stats.recentStats()); assertThat(stats.recentStats().requestsProcessed(), equalTo(1L)); @@ -255,7 +262,7 @@ public void testsTimeDependentStats() { assertThat(stats.recentStats().requestsProcessed(), equalTo(0L)); // 4th period - processor.processInferenceResult(wrapInferenceResult(new PyTorchInferenceResult("foo", null, 300L, false))); + processor.processInferenceResult(wrapInferenceResult("foo", false, 300L, new PyTorchInferenceResult(null))); stats = processor.getResultStats(); assertNotNull(stats.recentStats()); assertThat(stats.recentStats().requestsProcessed(), equalTo(1L)); @@ -263,8 +270,8 @@ public void testsTimeDependentStats() { assertThat(stats.lastUsed(), equalTo(Instant.ofEpochMilli(resultTimestamps[9]))); // 7th period - processor.processInferenceResult(wrapInferenceResult(new PyTorchInferenceResult("foo", null, 410L, false))); - processor.processInferenceResult(wrapInferenceResult(new PyTorchInferenceResult("foo", null, 390L, false))); + processor.processInferenceResult(wrapInferenceResult("foo", false, 410L, new PyTorchInferenceResult(null))); + processor.processInferenceResult(wrapInferenceResult("foo", false, 390L, new PyTorchInferenceResult(null))); stats = processor.getResultStats(); assertThat(stats.recentStats().requestsProcessed(), equalTo(0L)); assertThat(stats.recentStats().avgInferenceTime(), nullValue()); @@ -275,9 +282,9 @@ public void testsTimeDependentStats() { assertThat(stats.lastUsed(), equalTo(Instant.ofEpochMilli(resultTimestamps[12]))); // 8th period - processor.processInferenceResult(wrapInferenceResult(new PyTorchInferenceResult("foo", null, 510L, false))); - processor.processInferenceResult(wrapInferenceResult(new PyTorchInferenceResult("foo", null, 500L, false))); - processor.processInferenceResult(wrapInferenceResult(new PyTorchInferenceResult("foo", null, 490L, false))); + processor.processInferenceResult(wrapInferenceResult("foo", false, 510L, new PyTorchInferenceResult(null))); + processor.processInferenceResult(wrapInferenceResult("foo", false, 500L, new PyTorchInferenceResult(null))); + processor.processInferenceResult(wrapInferenceResult("foo", false, 490L, new PyTorchInferenceResult(null))); stats = processor.getResultStats(); assertNotNull(stats.recentStats()); assertThat(stats.recentStats().requestsProcessed(), equalTo(3L)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/AckResultTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/AckResultTests.java new file mode 100644 index 0000000000000..b1b83e4d18851 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/AckResultTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.pytorch.results; + +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +public class AckResultTests extends AbstractXContentTestCase { + + public static AckResult createRandom() { + return new AckResult(randomBoolean()); + } + + @Override + protected AckResult createTestInstance() { + return createRandom(); + } + + @Override + protected AckResult doParseInstance(XContentParser parser) throws IOException { + return AckResult.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ErrorResultTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ErrorResultTests.java index 3c7dacd84afb4..ac197c898fdc7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ErrorResultTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ErrorResultTests.java @@ -15,7 +15,7 @@ public class ErrorResultTests extends AbstractXContentTestCase { public static ErrorResult createRandom() { - return new ErrorResult(randomBoolean() ? null : randomAlphaOfLength(5), randomAlphaOfLength(5)); + return new ErrorResult(randomAlphaOfLength(50)); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchInferenceResultTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchInferenceResultTests.java index 005271739dfc4..f7370f25e2e84 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchInferenceResultTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchInferenceResultTests.java @@ -30,8 +30,6 @@ protected PyTorchInferenceResult createTestInstance() { } public static PyTorchInferenceResult createRandom() { - String id = randomAlphaOfLength(6); - int rows = randomIntBetween(1, 10); int columns = randomIntBetween(1, 10); int depth = randomIntBetween(1, 10); @@ -43,6 +41,6 @@ public static PyTorchInferenceResult createRandom() { } } } - return new PyTorchInferenceResult(id, arr, randomLong(), randomBoolean()); + return new PyTorchInferenceResult(arr); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchResultTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchResultTests.java index 9325dbb5d3ebe..9281fbfc54d13 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchResultTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/PyTorchResultTests.java @@ -16,11 +16,21 @@ public class PyTorchResultTests extends AbstractXContentTestCase @Override protected PyTorchResult createTestInstance() { - int type = randomIntBetween(0, 2); + String requestId = randomAlphaOfLength(5); + int type = randomIntBetween(0, 3); return switch (type) { - case 0 -> new PyTorchResult(PyTorchInferenceResultTests.createRandom(), null, null); - case 1 -> new PyTorchResult(null, ThreadSettingsTests.createRandom(), null); - default -> new PyTorchResult(null, null, ErrorResultTests.createRandom()); + case 0 -> new PyTorchResult( + requestId, + randomBoolean(), + randomNonNegativeLong(), + PyTorchInferenceResultTests.createRandom(), + null, + null, + null + ); + case 1 -> new PyTorchResult(requestId, null, null, null, ThreadSettingsTests.createRandom(), null, null); + case 2 -> new PyTorchResult(requestId, null, null, null, null, AckResultTests.createRandom(), null); + default -> new PyTorchResult(requestId, null, null, null, null, null, ErrorResultTests.createRandom()); }; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ThreadSettingsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ThreadSettingsTests.java index 62ec2a4da27f9..ce3b9d9fa07f3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ThreadSettingsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ThreadSettingsTests.java @@ -15,11 +15,7 @@ public class ThreadSettingsTests extends AbstractXContentTestCase { public static ThreadSettings createRandom() { - return new ThreadSettings( - randomIntBetween(1, Integer.MAX_VALUE), - randomIntBetween(1, Integer.MAX_VALUE), - randomBoolean() ? null : randomAlphaOfLength(5) - ); + return new ThreadSettings(randomIntBetween(1, Integer.MAX_VALUE), randomIntBetween(1, Integer.MAX_VALUE)); } @Override diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml index bc4a36cef9ddd..6d0348b1fba92 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml @@ -76,8 +76,6 @@ setup: --- "Test start and stop deployment with cache": - skip: - version: all - reason: "@AwaitsFix https://github.com/elastic/ml-cpp/pull/2376" features: allowed_warnings - do: diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index b0e624b470d0b..f1c7c04905bea 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -31,7 +31,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -@AbstractFullClusterRestartTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/ml-cpp/pull/2376") public class MLModelDeploymentFullClusterRestartIT extends AbstractFullClusterRestartTestCase { // See PyTorchModelIT for how this model was created diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java index 8109ce0f7d0f3..682875ae5a2e5 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java @@ -29,7 +29,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -@AbstractUpgradeTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/ml-cpp/pull/2376") public class MLModelDeploymentsUpgradeIT extends AbstractUpgradeTestCase { // See PyTorchModelIT for how this model was created From 21eb984e645bf7b5f71cc85ec6545f1d83bf9d6c Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Wed, 3 Aug 2022 15:19:01 -0400 Subject: [PATCH 090/265] Deprecate the _knn_search endpoint (#88828) This change deprecates the kNN search API in favor of the new 'knn' option inside the search API. The 'knn' option is now the preferred way of performing kNN search. Relates to #87625 --- docs/changelog/88828.yaml | 13 +++++++ docs/reference/search.asciidoc | 1 - docs/reference/search/knn-search.asciidoc | 3 +- .../test/search.vectors/40_knn_search.yml | 9 +++++ .../50_dense_vector_field_usage.yml | 9 +---- .../action/search/RestKnnSearchAction.java | 8 +++- .../search/RestKnnSearchActionTests.java | 39 +++++++++++++++++++ 7 files changed, 72 insertions(+), 10 deletions(-) create mode 100644 docs/changelog/88828.yaml create mode 100644 server/src/test/java/org/elasticsearch/rest/action/search/RestKnnSearchActionTests.java diff --git a/docs/changelog/88828.yaml b/docs/changelog/88828.yaml new file mode 100644 index 0000000000000..9880ceffdd305 --- /dev/null +++ b/docs/changelog/88828.yaml @@ -0,0 +1,13 @@ +pr: 88828 +summary: Deprecate the `_knn_search` endpoint +area: Vector Search +type: deprecation +issues: [] +deprecation: + title: Deprecate the `_knn_search` endpoint + area: REST API + details: -| + The kNN search API is deprecated in favor of the new 'knn' option + inside the search API. The 'knn' option is now the recommended way of running + ANN search. + impact: Users should switch from `_knn_search` to the search `knn` option. diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 4dad5ec48c9dc..3a147e6ad9bc0 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -15,7 +15,6 @@ exception of the <>. * <> * <> * <> -* <> * <> * <> * <> diff --git a/docs/reference/search/knn-search.asciidoc b/docs/reference/search/knn-search.asciidoc index a4b1f9c00b3be..e37fec4612ce0 100644 --- a/docs/reference/search/knn-search.asciidoc +++ b/docs/reference/search/knn-search.asciidoc @@ -4,8 +4,8 @@ kNN search ++++ +deprecated::[8.4.0,"The kNN search API has been replaced by the <<<> in the search API."] experimental::[] - Performs a k-nearest neighbor (kNN) search and returns the matching documents. //// @@ -46,6 +46,7 @@ GET my-index/_knn_search } ---- // TEST[continued] +// TEST[warning:The kNN search API has been replaced by the `knn` option in the search API.] [[knn-search-api-request]] ==== {api-request-title} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index f2c5d635c1cc6..0ffec61788a77 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -160,7 +160,11 @@ setup: --- "kNN search in _knn_search endpoint": + - skip: + features: ["allowed_warnings"] - do: + allowed_warnings: + - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: index: test body: @@ -182,7 +186,10 @@ setup: - skip: version: ' - 8.1.99' reason: 'kNN with filtering added in 8.2' + features: ["allowed_warnings"] - do: + allowed_warnings: + - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: index: test body: @@ -201,6 +208,8 @@ setup: - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} - do: + allowed_warnings: + - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: index: test body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml index c3ce4e7bd5f76..a8635d529b296 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml @@ -24,6 +24,7 @@ setup: - do: index: index: futest + id: "1" body: name: cow.jpg vector: [ 230.0, 300.33, -34.8988, 15.555, -200.0 ] @@ -53,7 +54,7 @@ setup: version: ' - 8.0.99' reason: 'dense_vector field usage was added in 8.1' - do: - knn_search: + search: index: futest body: fields: [ "name" ] @@ -63,12 +64,6 @@ setup: k: 2 num_candidates: 3 - - match: {hits.hits.0._id: "2"} - - match: {hits.hits.0.fields.name.0: "moose.jpg"} - - - match: {hits.hits.1._id: "3"} - - match: {hits.hits.1.fields.name.0: "rabbit.jpg"} - - do: indices.field_usage_stats: { index: futest } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java index 30b25c8d4cfef..b697db7929ecf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestCancellableNodeClient; @@ -28,11 +29,16 @@ */ public class RestKnnSearchAction extends BaseRestHandler { + static final String DEPRECATION_MESSAGE = "The kNN search API has been replaced by the `knn` option in the search API."; + public RestKnnSearchAction() {} @Override public List routes() { - return List.of(new Route(GET, "{index}/_knn_search"), new Route(POST, "{index}/_knn_search")); + return List.of( + Route.builder(GET, "{index}/_knn_search").deprecated(DEPRECATION_MESSAGE, RestApiVersion.V_8).build(), + Route.builder(POST, "{index}/_knn_search").deprecated(DEPRECATION_MESSAGE, RestApiVersion.V_8).build() + ); } @Override diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestKnnSearchActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestKnnSearchActionTests.java new file mode 100644 index 0000000000000..e57d54b8fdde9 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/search/RestKnnSearchActionTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.rest.action.search; + +import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.RestActionTestCase; +import org.junit.Before; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class RestKnnSearchActionTests extends RestActionTestCase { + private List contentTypeHeader; + private RestKnnSearchAction action; + + @Before + public void setUpAction() { + action = new RestKnnSearchAction(); + controller().registerHandler(action); + contentTypeHeader = Collections.singletonList(randomCompatibleMediaType(RestApiVersion.V_8)); + } + + public void testDeprecation() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withHeaders( + Map.of("Content-Type", contentTypeHeader, "Accept", contentTypeHeader) + ).withMethod(RestRequest.Method.GET).withPath("/some_index/_knn_search").build(); + + dispatchRequest(request); + assertCriticalWarnings(RestKnnSearchAction.DEPRECATION_MESSAGE); + } +} From 0bed7f768ad299f779e2dc39c27db7487f5609d0 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Wed, 3 Aug 2022 16:13:14 -0400 Subject: [PATCH 091/265] Fix failures in vector field usage mixed cluster test --- .../test/search.vectors/50_dense_vector_field_usage.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml index a8635d529b296..854543f7b2144 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml @@ -53,8 +53,11 @@ setup: - skip: version: ' - 8.0.99' reason: 'dense_vector field usage was added in 8.1' + features: ["allowed_warnings"] - do: - search: + allowed_warnings: + - "The kNN search API has been replaced by the `knn` option in the search API." + knn_search: index: futest body: fields: [ "name" ] From fe011ed874a03c651e072c8c8fb546b36b3eec1f Mon Sep 17 00:00:00 2001 From: Justin Cranford <89857999+justincr-elastic@users.noreply.github.com> Date: Wed, 3 Aug 2022 16:26:13 -0400 Subject: [PATCH 092/265] Mention `_async_search` internal user which was added in 7.7 (#89050) Co-authored-by: Elastic Machine --- .../docs/en/security/authentication/internal-users.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/x-pack/docs/en/security/authentication/internal-users.asciidoc b/x-pack/docs/en/security/authentication/internal-users.asciidoc index 91b5d70239495..86fc0254fed76 100644 --- a/x-pack/docs/en/security/authentication/internal-users.asciidoc +++ b/x-pack/docs/en/security/authentication/internal-users.asciidoc @@ -2,9 +2,9 @@ [[internal-users]] === Internal users -The {stack-security-features} use three _internal_ users (`_system`, `_xpack`, -and `_xpack_security`), which are responsible for the operations that take place -inside an {es} cluster. +The {stack-security-features} use four _internal_ users (`_system`, `_xpack`, +`_xpack_security`, and `_async_search`), which are responsible for the operations +that take place inside an {es} cluster. These users are only used by requests that originate from within the cluster. For this reason, they cannot be used to authenticate against the API and there From 4af70699587beddc01d5586f6d1a228e15053d7a Mon Sep 17 00:00:00 2001 From: Stef Nestor Date: Wed, 3 Aug 2022 14:31:20 -0600 Subject: [PATCH 093/265] Update ES.ILM.Action.ReadOnly (#89054) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Related to [Discuss#311070](https://discuss.elastic.co/t/action-readonly-appears-to-set-index-blocks-write-not-index-blocks-read-only/311070), @joegallo explains > The [ReadOnlyAction](https://github.com/elastic/elasticsearch/blob/main/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReadOnlyAction.java#L58-L65) is composed of a series of steps, the most important to this conversation being the [ReadOnlyStep](https://github.com/elastic/elasticsearch/blob/main/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReadOnlyStep.java#L42). That step does indeed add a write block (as opposed to a ‘read_only’) block, almost certainly the reasoning is that a ‘read_only’ block makes the index metadata read only, also, and we can’t have that — it would prevent the index from moving through the rest of the ILM process. E.g. can’t reassign tiers, can’t change replicas, can’t even change the currently assigned ilm phase/action/step, etc, if you can’t change the index’s metadata. So, the intention of ILM Action "Read Only" is to make an index's data read only and not also the index's metadata. This also decouples "read only" from understanding overlapping to `index.blocks.read_only` which appears to be an accidental thought overlap. --- docs/reference/ilm/actions/ilm-readonly.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/ilm/actions/ilm-readonly.asciidoc b/docs/reference/ilm/actions/ilm-readonly.asciidoc index 4ebf7669e44ae..47b4505bc4ec7 100644 --- a/docs/reference/ilm/actions/ilm-readonly.asciidoc +++ b/docs/reference/ilm/actions/ilm-readonly.asciidoc @@ -4,8 +4,8 @@ Phases allowed: hot, warm, cold. -Makes the index read-only; -writes and metadata changes are no longer allowed. +Makes the index data read-only; +disables data write operations against the index. To use the `readonly` action in the `hot` phase, the `rollover` action *must* be present. If no rollover action is configured, {ilm-init} will reject the policy. From 5da482b9deb8280092328d33cd1ebc9222f6fb1c Mon Sep 17 00:00:00 2001 From: Stef Nestor Date: Wed, 3 Aug 2022 14:32:15 -0600 Subject: [PATCH 094/265] ILM Frozen allows Unfollow Action (#88973) Updates [Phase Action](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-index-lifecycle.html#ilm-phase-actions) list to agree with [Unfollow](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-unfollow.html) page that Frozen tier accepts Unfollow action. Confirmed v8.3 ```diff PUT _ilm/policy/my_policy {"policy": {"phases": { "frozen": { "actions": { + "unfollow" : {}, "searchable_snapshot": { "snapshot_repository" : "found-snapshots"} } } } } } {"acknowledged": true } ``` --- docs/reference/ilm/ilm-index-lifecycle.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/reference/ilm/ilm-index-lifecycle.asciidoc b/docs/reference/ilm/ilm-index-lifecycle.asciidoc index 753047604bbd9..034a38e91a580 100644 --- a/docs/reference/ilm/ilm-index-lifecycle.asciidoc +++ b/docs/reference/ilm/ilm-index-lifecycle.asciidoc @@ -106,6 +106,7 @@ actions in the order listed. - <> - <> * Frozen + - <> - <> * Delete - <> From f6cce702aeb4639e1bdb332a8c01819fd8a4d6ab Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Thu, 4 Aug 2022 08:35:32 +0200 Subject: [PATCH 095/265] Add test in vector tiles with runtime fields (#89044) --- .../xpack/vectortile/VectorTileRestIT.java | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/x-pack/plugin/vector-tile/src/javaRestTest/java/org/elasticsearch/xpack/vectortile/VectorTileRestIT.java b/x-pack/plugin/vector-tile/src/javaRestTest/java/org/elasticsearch/xpack/vectortile/VectorTileRestIT.java index fa3e7d77474d3..b33747ed08b7d 100644 --- a/x-pack/plugin/vector-tile/src/javaRestTest/java/org/elasticsearch/xpack/vectortile/VectorTileRestIT.java +++ b/x-pack/plugin/vector-tile/src/javaRestTest/java/org/elasticsearch/xpack/vectortile/VectorTileRestIT.java @@ -878,6 +878,19 @@ public void testOverlappingMultipolygon() throws Exception { assertThat(response.getStatusLine().getStatusCode(), Matchers.equalTo(HttpStatus.SC_OK)); } + public void testGetRuntimeField() throws Exception { + final Request mvtRequest = new Request(getHttpMethod(), INDEX_POINTS + "/_mvt/location_rf/" + z + "/" + x + "/" + y); + mvtRequest.setJsonEntity( + "{\"size\" : 100, \"runtime_mappings\": { \"location_rf\": {\"type\": \"geo_point\", \"script\": " + + "{ \"source\": \"emit(doc['location'].lat, doc['location'].lon)\" }}}}" + ); + final VectorTile.Tile tile = execute(mvtRequest); + assertThat(tile.getLayersCount(), Matchers.equalTo(3)); + assertLayer(tile, HITS_LAYER, 4096, 33, 2); + assertLayer(tile, AGGS_LAYER, 4096, 1, 2); + assertLayer(tile, META_LAYER, 4096, 1, 13); + } + private String getHttpMethod() { return random().nextBoolean() ? HttpGet.METHOD_NAME : HttpPost.METHOD_NAME; } From 68050e950273435fe96a66c49c56dfb24b3c47d2 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Thu, 4 Aug 2022 08:51:31 +0200 Subject: [PATCH 096/265] [ML] Optimize frequent items transaction lookup (#89062) represent transactions as bitsets for faster lookups when iterating over candidate sets. This PR implements a lookup table and a subset check based on bits. It uses this lookup table to map transactions to items, this so-called horizontal representation is used to speedup the lookup that checks if a transaction contains the candidate item set --- docs/changelog/89062.yaml | 5 ++ .../CountingItemSetTraverser.java | 58 +++++++------ .../aggs/frequentitemsets/ItemSetBitSet.java | 12 ++- .../frequentitemsets/TransactionStore.java | 44 ++++++++++ .../TransactionsLookupTable.java | 85 +++++++++++++++++++ .../frequentitemsets/ItemSetBitSetTests.java | 7 ++ .../TransactionLookupTableTests.java | 81 ++++++++++++++++++ 7 files changed, 262 insertions(+), 30 deletions(-) create mode 100644 docs/changelog/89062.yaml create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionsLookupTable.java create mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionLookupTableTests.java diff --git a/docs/changelog/89062.yaml b/docs/changelog/89062.yaml new file mode 100644 index 0000000000000..654e9b41b9127 --- /dev/null +++ b/docs/changelog/89062.yaml @@ -0,0 +1,5 @@ +pr: 89062 +summary: Optimize frequent items transaction lookup +area: Machine Learning +type: enhancement +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java index 4d9de3a86e23c..3dd56d6246451 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java @@ -9,14 +9,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.BitSet; -import org.apache.lucene.util.FixedBitSet; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.xpack.ml.aggs.frequentitemsets.TransactionStore.TopItemIds; import java.io.IOException; import java.util.Arrays; +import java.util.BitSet; /** * Item set traverser to find the next interesting item set. @@ -31,7 +30,7 @@ * that do not pass a previous step: * if [a, b] is not in T, [a, b, c] can not be in T either */ -class CountingItemSetTraverser implements Releasable { +final class CountingItemSetTraverser implements Releasable { private static final Logger logger = LogManager.getLogger(CountingItemSetTraverser.class); // start size and size increment for the occurences stack @@ -40,6 +39,8 @@ class CountingItemSetTraverser implements Releasable { private final TransactionStore transactionStore; private final ItemSetTraverser topItemSetTraverser; private final TransactionStore.TopTransactionIds topTransactionIds; + + private final TransactionsLookupTable transactionsLookupTable; private final int cacheTraversalDepth; private final int cacheNumberOfTransactions; @@ -49,7 +50,7 @@ class CountingItemSetTraverser implements Releasable { private long[] occurencesStack; // growable bit set from java util - private java.util.BitSet visited; + private BitSet visited; CountingItemSetTraverser( TransactionStore transactionStore, @@ -57,7 +58,7 @@ class CountingItemSetTraverser implements Releasable { int cacheTraversalDepth, int cacheNumberOfTransactions, long minCount - ) { + ) throws IOException { this.transactionStore = transactionStore; boolean success = false; @@ -65,6 +66,7 @@ class CountingItemSetTraverser implements Releasable { // we allocate 2 big arrays, if the 2nd allocation fails, ensure we clean up this.topItemSetTraverser = new ItemSetTraverser(topItemIds); this.topTransactionIds = transactionStore.getTopTransactionIds(); + this.transactionsLookupTable = transactionStore.createLookupTableByTopTransactions(topItemIds, topTransactionIds); success = true; } finally { if (false == success) { @@ -75,7 +77,7 @@ class CountingItemSetTraverser implements Releasable { this.cacheTraversalDepth = cacheTraversalDepth; this.cacheNumberOfTransactions = cacheNumberOfTransactions; transactionSkipCounts = new long[cacheTraversalDepth - 1]; - transactionSkipList = new FixedBitSet((cacheTraversalDepth - 1) * cacheNumberOfTransactions); + transactionSkipList = new BitSet((cacheTraversalDepth - 1) * cacheNumberOfTransactions); occurencesStack = new long[OCCURENCES_SIZE_INCREMENT]; visited = new java.util.BitSet(); } @@ -109,41 +111,41 @@ public boolean next(long earlyStopMinCount) throws IOException { // we recalculate the row for this depth, so we have to clear the bits first transactionSkipList.clear((depth - 1) * cacheNumberOfTransactions, ((depth) * cacheNumberOfTransactions)); - int transactionNumber = 0; - long occurences = 0; + int topTransactionPos = 0; + long occurrences = 0; - for (Long transactionId : topTransactionIds) { + // for whatever reason this turns out to be faster than a for loop + while (topTransactionPos < topTransactionIds.size()) { // caching: if the transaction is already marked for skipping, quickly continue - if (transactionNumber < cacheNumberOfTransactions - && transactionSkipList.get(cacheNumberOfTransactions * (depth - 2) + transactionNumber)) { + if (topTransactionPos < cacheNumberOfTransactions + && transactionSkipList.get(cacheNumberOfTransactions * (depth - 2) + topTransactionPos)) { // set the bit for the next iteration - transactionSkipList.set(cacheNumberOfTransactions * (depth - 1) + transactionNumber); - transactionNumber++; + transactionSkipList.set(cacheNumberOfTransactions * (depth - 1) + topTransactionPos); + topTransactionPos++; continue; } - long transactionCount = transactionStore.getTransactionCount(transactionId); + long transactionCount = transactionStore.getTransactionCount(topTransactionIds.getItemIdAt(topTransactionPos)); - if (transactionStore.transactionContainsAllIds(topItemSetTraverser.getItemSet(), transactionId)) { - occurences += transactionCount; - } else if (transactionNumber < cacheNumberOfTransactions) { + if (transactionsLookupTable.isSubsetOf(topTransactionPos, topItemSetTraverser.getItemSetBitSet())) { + occurrences += transactionCount; + } else if (topTransactionPos < cacheNumberOfTransactions) { // put this transaction to the skip list skipCount += transactionCount; - transactionSkipList.set(cacheNumberOfTransactions * (depth - 1) + transactionNumber); + transactionSkipList.set(cacheNumberOfTransactions * (depth - 1) + topTransactionPos); } maxReachableTransactionCount -= transactionCount; // exit early if min support given for early stop can't be reached - if (maxReachableTransactionCount + occurences < earlyStopMinCount) { + if (maxReachableTransactionCount + occurrences < earlyStopMinCount) { break; } - transactionNumber++; + topTransactionPos++; } - transactionSkipCounts[depth - 1] = skipCount; - rememberCountInStack(depth, occurences); + rememberCountInStack(depth, occurrences); return true; } @@ -158,7 +160,7 @@ public boolean next(long earlyStopMinCount) throws IOException { long maxReachableTransactionCount = totalTransactionCount - skipCount; int transactionNumber = 0; - long occurences = 0; + long occurrences = 0; for (Long transactionId : topTransactionIds) { // caching: if the transaction is already marked for skipping, quickly continue if (transactionNumber < cacheNumberOfTransactions @@ -169,21 +171,21 @@ public boolean next(long earlyStopMinCount) throws IOException { } long transactionCount = transactionStore.getTransactionCount(transactionId); - if (transactionStore.transactionContainsAllIds(topItemSetTraverser.getItemSet(), transactionId)) { - occurences += transactionCount; + if (transactionsLookupTable.isSubsetOf(transactionNumber, topItemSetTraverser.getItemSetBitSet())) { + occurrences += transactionCount; } maxReachableTransactionCount -= transactionCount; // exit early if min support given for early stop can't be reached - if (maxReachableTransactionCount + occurences < earlyStopMinCount) { + if (maxReachableTransactionCount + occurrences < earlyStopMinCount) { break; } transactionNumber++; } - rememberCountInStack(depth, occurences); + rememberCountInStack(depth, occurrences); return true; } @@ -268,7 +270,7 @@ public boolean atLeaf() { @Override public void close() { - Releasables.close(topTransactionIds); + Releasables.close(topTransactionIds, transactionsLookupTable); } // remember the count in the stack without tracking push and pop diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSet.java index 9a87fad024101..a2006550011b6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSet.java @@ -32,8 +32,9 @@ class ItemSetBitSet implements Cloneable { /* Used to shift left or right for a partial word mask */ private static final long WORD_MASK = 0xffffffffffffffffL; - private long[] words; - private transient int wordsInUse = 0; + // allow direct access for transaction lookup table + long[] words; + transient int wordsInUse = 0; private int cardinality = 0; ItemSetBitSet() { @@ -90,6 +91,13 @@ void clear(int bitIndex) { recalculateWordsInUse(); } + public void clear() { + while (wordsInUse > 0) { + words[--wordsInUse] = 0; + } + cardinality = 0; + } + /** * Returns true if the specified {@code ItemBitSet} is a subset of this * set. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStore.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStore.java index 5a4b48dc1c53c..ea861dba5d7d3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStore.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStore.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BytesRefArray; +import org.elasticsearch.common.util.IntArray; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -138,6 +139,10 @@ public long size() { return sortedTransactions.size(); } + public long getItemIdAt(long index) { + return sortedTransactions.get(index); + } + @Override public void close() { Releasables.close(sortedTransactions); @@ -350,6 +355,45 @@ public TopItemIds getTopItemIds() { return getTopItemIds(getItems().size()); } + /** + * Create a lookup table (bit matrix) containing a so-called "horizontal" representation of transactions to item ids. + * + * A bit is set according to the position in topItems, if a transaction contains an item the bit is set. + * The lookup table rows correspond to the order in top transactions. + * + * @param topItems the top items + * @param topTransactions the top transactions + * @return a transaction lookup table + * @throws IOException + */ + public TransactionsLookupTable createLookupTableByTopTransactions(TopItemIds topItems, TopTransactionIds topTransactions) + throws IOException { + try (IntArray positions = bigArrays.newIntArray(topItems.size())) { + + // helper lookup table that maps an item id to the position in the top items vector + for (int i = 0; i < topItems.size(); ++i) { + positions.set(topItems.getItemIdAt(i), i); + } + + BytesRefArray transactions = getTransactions(); + TransactionsLookupTable lookupTable = new TransactionsLookupTable(transactions.size(), bigArrays); + ItemSetBitSet bitSet = new ItemSetBitSet(); + + for (Long id : topTransactions) { + bitSet.clear(); + transactions.get(id, scratchBytesRef); + scratchByteArrayStreamInput.reset(scratchBytesRef.bytes, scratchBytesRef.offset, scratchBytesRef.length); + + while (scratchByteArrayStreamInput.available() > 0) { + // flip the bit according to the position in top items + bitSet.set(1 + positions.get(scratchByteArrayStreamInput.readVLong())); + } + lookupTable.append(bitSet); + } + return lookupTable; + } + } + /** * Check if a transaction specified by id contains the item * diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionsLookupTable.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionsLookupTable.java new file mode 100644 index 0000000000000..35948682aa80c --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionsLookupTable.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.aggs.frequentitemsets; + +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +/** + * Lookup table to represent transactions as bit sets. + */ +public class TransactionsLookupTable implements Accountable, Releasable { + + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(TransactionsLookupTable.class); + + private final BigArrays bigArrays; + private LongArray startOffsets; + private LongArray longs; + private long size; + + public TransactionsLookupTable(long capacity, BigArrays bigArrays) { + this.bigArrays = bigArrays; + boolean success = false; + try { + startOffsets = bigArrays.newLongArray(capacity + 1, false); + startOffsets.set(0, 0); + longs = bigArrays.newLongArray(capacity * 3, false); + success = true; + } finally { + if (false == success) { + close(); + } + } + size = 0; + } + + public void append(ItemSetBitSet itemSetBitSet) { + final long startOffset = startOffsets.get(size); + longs = bigArrays.grow(longs, startOffset + itemSetBitSet.wordsInUse); + for (int i = 0; i < itemSetBitSet.wordsInUse; ++i) { + longs.set(startOffset + i, itemSetBitSet.words[i]); + } + startOffsets = bigArrays.grow(startOffsets, size + 2); + startOffsets.set(size + 1, startOffset + itemSetBitSet.wordsInUse); + ++size; + } + + boolean isSubsetOf(long row, ItemSetBitSet set) { + final long startOffset = startOffsets.get(row); + final int wordsInUse = (int) (startOffsets.get(row + 1) - startOffset); + + if (set.wordsInUse > wordsInUse) { + return false; + } + + for (int i = set.wordsInUse - 1; i >= 0; i--) { + final long word = longs.get(startOffset + i); + if ((word & set.words[i]) != set.words[i]) return false; + } + + return true; + } + + public long size() { + return size; + } + + @Override + public void close() { + Releasables.close(longs, startOffsets); + } + + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + startOffsets.ramBytesUsed() + longs.ramBytesUsed(); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSetTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSetTests.java index b70775391f122..e40decb0883ca 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSetTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetBitSetTests.java @@ -171,6 +171,13 @@ public void testCardinality() { set.reset(set2); assertEquals(0, set.cardinality()); set.clear(999); + + set.set(54); + set.set(20); + assertEquals(2, set.cardinality()); + + set.clear(); + assertEquals(0, set.cardinality()); } public void testHashCode() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionLookupTableTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionLookupTableTests.java new file mode 100644 index 0000000000000..d7720e18fb6da --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionLookupTableTests.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.aggs.frequentitemsets; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.ESTestCase; + +public class TransactionLookupTableTests extends ESTestCase { + + static BigArrays mockBigArrays() { + return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + } + + public void testBasic() { + try (TransactionsLookupTable transactions = new TransactionsLookupTable(10, mockBigArrays())) { + + // setup 3 transactions + ItemSetBitSet set = new ItemSetBitSet(); + set.set(0); + set.set(3); + set.set(200); + set.set(5); + set.set(65); + transactions.append(set); + set.clear(); + set.set(2); + set.set(33); + set.set(44); + transactions.append(set); + assertEquals(2, transactions.size()); + set.clear(); + set.set(3); + set.set(5); + set.set(65); + set.set(99); + transactions.append(set); + assertEquals(3, transactions.size()); + + // lookup + set.clear(); + set.set(3); + set.set(65); + assertTrue(transactions.isSubsetOf(0, set)); + assertFalse(transactions.isSubsetOf(1, set)); + assertTrue(transactions.isSubsetOf(2, set)); + + set.set(64); + assertFalse(transactions.isSubsetOf(0, set)); + assertFalse(transactions.isSubsetOf(1, set)); + assertFalse(transactions.isSubsetOf(2, set)); + set.clear(64); + + set.set(258); + assertFalse(transactions.isSubsetOf(0, set)); + assertFalse(transactions.isSubsetOf(1, set)); + assertFalse(transactions.isSubsetOf(2, set)); + set.clear(258); + + set.set(400); + assertFalse(transactions.isSubsetOf(0, set)); + assertFalse(transactions.isSubsetOf(1, set)); + assertFalse(transactions.isSubsetOf(2, set)); + set.clear(400); + + set.set(99); + assertFalse(transactions.isSubsetOf(0, set)); + assertFalse(transactions.isSubsetOf(1, set)); + assertTrue(transactions.isSubsetOf(2, set)); + } + } + +} From 1c56d68f49dd6e39708072b6ccde949fd94f8da6 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 4 Aug 2022 09:23:42 +0200 Subject: [PATCH 097/265] Adjust wording in frozen tier allocation deciders (#88843) The allocation deciders for dedicated/non-dedicated frozen nodes use the "frozen searchable snapshot" terms for what was renamed later (in #72699) to partially mounted indices. Hopefully not controversial, this changes makes the wording of the deciders more coherent with the current documentation. --- .../shared/PartiallyCachedShardAllocationIntegTests.java | 2 +- .../decider/DedicatedFrozenNodeAllocationDecider.java | 4 ++-- .../allocation/decider/HasFrozenCacheAllocationDecider.java | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java index bc1edfe1e20ac..7b2cffb3561e0 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java @@ -124,7 +124,7 @@ public void testPartialSearchableSnapshotNotAllocatedToNodesWithoutCache() throw .stream() .anyMatch( d -> d.getExplanation().contains(SHARED_CACHE_SIZE_SETTING.getKey()) - && d.getExplanation().contains("frozen searchable snapshot shards cannot be allocated to this node") + && d.getExplanation().contains("shards of partially mounted indices cannot be allocated to this node") ) ); } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/DedicatedFrozenNodeAllocationDecider.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/DedicatedFrozenNodeAllocationDecider.java index cf9159d3f5af5..14c757220aa51 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/DedicatedFrozenNodeAllocationDecider.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/DedicatedFrozenNodeAllocationDecider.java @@ -31,7 +31,7 @@ public class DedicatedFrozenNodeAllocationDecider extends AllocationDecider { private static final Decision YES_IS_PARTIAL_SEARCHABLE_SNAPSHOT = Decision.single( Decision.Type.YES, NAME, - "this index is a frozen searchable snapshot so it can be assigned to this dedicated frozen node" + "this index is a partially mounted index so it can be assigned to this dedicated frozen node" ); private static final Decision NO = Decision.single( @@ -39,7 +39,7 @@ public class DedicatedFrozenNodeAllocationDecider extends AllocationDecider { NAME, "this node's data roles are exactly [" + DATA_FROZEN_NODE_ROLE.roleName() - + "] so it may only hold shards from frozen searchable snapshots, but this index is not a frozen searchable snapshot" + + "] so it may only hold shards from partially mounted indices, but this index is not a partially mounted index" ); @Override diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/HasFrozenCacheAllocationDecider.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/HasFrozenCacheAllocationDecider.java index d16f5483aab09..93518d96173ff 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/HasFrozenCacheAllocationDecider.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/HasFrozenCacheAllocationDecider.java @@ -31,7 +31,7 @@ public class HasFrozenCacheAllocationDecider extends AllocationDecider { private static final Decision HAS_FROZEN_CACHE = Decision.single( Decision.Type.YES, NAME, - "this node has a frozen searchable snapshot shard cache" + "this node has a searchable snapshot shared cache" ); private static final Decision NO_FROZEN_CACHE = Decision.single( @@ -39,13 +39,13 @@ public class HasFrozenCacheAllocationDecider extends AllocationDecider { NAME, "node setting [" + SHARED_CACHE_SIZE_SETTING.getKey() - + "] is set to zero, so frozen searchable snapshot shards cannot be allocated to this node" + + "] is set to zero, so shards of partially mounted indices cannot be allocated to this node" ); private static final Decision UNKNOWN_FROZEN_CACHE = Decision.single( Decision.Type.NO, NAME, - "there was an error fetching the frozen cache state from this node" + "there was an error fetching the searchable snapshot shared cache state from this node" ); private final FrozenCacheInfoService frozenCacheService; From 3909b5eaf92b1147de2362b46f34359f674470f4 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 4 Aug 2022 09:51:16 +0200 Subject: [PATCH 098/265] Add verification metadata for dependencies (#88814) Removing the custom dependency checksum functionality in favor of Gradle build-in dependency verification support. - Use sha256 in favor of sha1 as sha1 is not considered safe these days. Closes https://github.com/elastic/elasticsearch/issues/69736 --- BUILDING.md | 32 + .../DependencyLicensesPrecommitPlugin.java | 3 - .../precommit/DependencyLicensesTask.java | 99 +- .../internal/precommit/UpdateShasTask.java | 78 - .../DependencyLicensesTaskTests.java | 72 +- .../precommit/UpdateShasTaskTests.java | 146 - .../rest/licenses/commons-codec-1.14.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/httpasyncclient-4.1.5.jar.sha1 | 1 - .../rest/licenses/httpclient-4.5.13.jar.sha1 | 1 - client/rest/licenses/httpcore-4.4.13.jar.sha1 | 1 - .../licenses/httpcore-nio-4.4.13.jar.sha1 | 1 - .../licenses/commons-codec-1.14.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/httpclient-4.5.13.jar.sha1 | 1 - .../sniffer/licenses/httpcore-4.4.13.jar.sha1 | 1 - .../licenses/jackson-core-2.13.2.jar.sha1 | 1 - .../licenses/jansi-2.4.0.jar.sha1 | 1 - .../licenses/bc-fips-1.0.2.jar.sha1 | 1 - .../licenses/bcpg-fips-1.0.4.jar.sha1 | 1 - gradle.properties | 3 + gradle/build.versions.toml | 4 +- gradle/verification-metadata.xml | 3600 +++++++++++++++++ libs/cli/licenses/jopt-simple-5.0.2.jar.sha1 | 1 - libs/grok/licenses/jcodings-1.0.44.jar.sha1 | 1 - libs/grok/licenses/joni-2.1.29.jar.sha1 | 1 - libs/lz4/licenses/lz4-java-1.8.0.jar.sha1 | 1 - .../licenses/jackson-core-2.13.2.jar.sha1 | 1 - .../jackson-dataformat-cbor-2.13.2.jar.sha1 | 1 - .../jackson-dataformat-smile-2.13.2.jar.sha1 | 1 - .../jackson-dataformat-yaml-2.13.2.jar.sha1 | 1 - .../impl/licenses/snakeyaml-1.30.jar.sha1 | 1 - .../licenses/SparseBitSet-1.2.jar.sha1 | 1 - .../apache-mime4j-core-0.8.5.jar.sha1 | 1 - .../licenses/apache-mime4j-dom-0.8.5.jar.sha1 | 1 - .../licenses/commons-codec-1.14.jar.sha1 | 1 - .../commons-collections4-4.1.jar.sha1 | 1 - .../licenses/commons-compress-1.21.jar.sha1 | 1 - .../licenses/commons-io-2.11.0.jar.sha1 | 1 - .../licenses/commons-lang3-3.9.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/commons-math3-3.6.1.jar.sha1 | 1 - .../licenses/fontbox-2.0.26.jar.sha1 | 1 - .../licenses/jempbox-1.8.16.jar.sha1 | 1 - .../licenses/juniversalchardet-1.0.3.jar.sha1 | 1 - .../licenses/pdfbox-2.0.26.jar.sha1 | 1 - .../licenses/poi-5.2.2.jar.sha1 | 1 - .../licenses/poi-ooxml-5.2.2.jar.sha1 | 1 - .../licenses/poi-ooxml-lite-5.2.2.jar.sha1 | 1 - .../licenses/poi-scratchpad-5.2.2.jar.sha1 | 1 - .../licenses/slf4j-api-1.6.2.jar.sha1 | 1 - .../licenses/tagsoup-1.2.1.jar.sha1 | 1 - .../licenses/tika-core-2.4.0.jar.sha1 | 1 - .../tika-langdetect-tika-2.4.0.jar.sha1 | 1 - .../tika-parser-apple-module-2.4.0.jar.sha1 | 1 - .../tika-parser-html-module-2.4.0.jar.sha1 | 1 - ...ika-parser-microsoft-module-2.4.0.jar.sha1 | 1 - ...ka-parser-miscoffice-module-2.4.0.jar.sha1 | 1 - .../tika-parser-pdf-module-2.4.0.jar.sha1 | 1 - .../tika-parser-text-module-2.4.0.jar.sha1 | 1 - .../tika-parser-xml-module-2.4.0.jar.sha1 | 1 - .../tika-parser-xmp-commons-2.4.0.jar.sha1 | 1 - .../tika-parser-zip-commons-2.4.0.jar.sha1 | 1 - .../licenses/xmlbeans-5.0.3.jar.sha1 | 1 - .../licenses/xz-1.8.jar.sha1 | 1 - .../licenses/httpclient-4.5.13.jar.sha1 | 1 - .../licenses/httpcore-4.4.13.jar.sha1 | 1 - .../licenses/geoip2-3.0.0.jar.sha1 | 1 - .../jackson-annotations-2.13.1.jar.sha1 | 1 - .../licenses/jackson-core-2.13.1.jar.sha1 | 1 - .../licenses/jackson-databind-2.13.1.jar.sha1 | 1 - .../licenses/maxmind-db-2.0.0.jar.sha1 | 1 - .../licenses/antlr4-runtime-4.5.1-1.jar.sha1 | 1 - .../lang-expression/licenses/asm-7.2.jar.sha1 | 1 - .../licenses/asm-analysis-7.2.jar.sha1 | 1 - .../licenses/asm-commons-7.2.jar.sha1 | 1 - .../licenses/asm-tree-7.2.jar.sha1 | 1 - .../licenses/compiler-0.9.10.jar.sha1 | 1 - .../licenses/antlr4-runtime-4.5.3.jar.sha1 | 1 - .../lang-painless/licenses/asm-7.2.jar.sha1 | 1 - .../licenses/asm-analysis-7.2.jar.sha1 | 1 - .../licenses/asm-commons-7.2.jar.sha1 | 1 - .../licenses/asm-tree-7.2.jar.sha1 | 1 - .../licenses/asm-util-7.2.jar.sha1 | 1 - .../licenses/jackson-core-2.13.2.jar.sha1 | 1 - .../licenses/jts-core-1.15.0.jar.sha1 | 1 - .../s2-geometry-library-java-1.0.1.jar.sha1 | 1 - .../licenses/spatial4j-0.7.jar.sha1 | 1 - .../licenses/azure-core-1.27.0.jar.sha1 | 1 - .../azure-core-http-netty-1.11.9.jar.sha1 | 1 - .../azure-storage-blob-12.16.0.jar.sha1 | 1 - .../azure-storage-common-12.15.1.jar.sha1 | 1 - .../jackson-annotations-2.13.2.jar.sha1 | 1 - .../licenses/jackson-core-2.13.2.jar.sha1 | 1 - .../jackson-databind-2.13.2.2.jar.sha1 | 1 - .../jackson-dataformat-xml-2.13.2.jar.sha1 | 1 - .../jackson-datatype-jsr310-2.13.2.jar.sha1 | 1 - ...on-module-jaxb-annotations-2.13.2.jar.sha1 | 1 - .../jakarta.activation-api-1.2.1.jar.sha1 | 1 - .../jakarta.xml.bind-api-2.3.2.jar.sha1 | 1 - .../licenses/log4j-slf4j-impl-2.18.0.jar.sha1 | 1 - .../netty-buffer-4.1.77.Final.jar.sha1 | 1 - .../netty-codec-4.1.77.Final.jar.sha1 | 1 - .../netty-codec-dns-4.1.77.Final.jar.sha1 | 1 - .../netty-codec-http-4.1.77.Final.jar.sha1 | 1 - .../netty-codec-http2-4.1.77.Final.jar.sha1 | 1 - .../netty-codec-socks-4.1.77.Final.jar.sha1 | 1 - .../netty-common-4.1.77.Final.jar.sha1 | 1 - .../netty-handler-4.1.77.Final.jar.sha1 | 1 - .../netty-handler-proxy-4.1.77.Final.jar.sha1 | 1 - .../netty-resolver-4.1.77.Final.jar.sha1 | 1 - .../netty-resolver-dns-4.1.77.Final.jar.sha1 | 1 - .../netty-transport-4.1.77.Final.jar.sha1 | 1 - ...t-native-unix-common-4.1.77.Final.jar.sha1 | 1 - .../licenses/reactive-streams-1.0.3.jar.sha1 | 1 - .../licenses/reactor-core-3.4.14.jar.sha1 | 1 - .../reactor-netty-core-1.0.15.jar.sha1 | 1 - .../reactor-netty-http-1.0.15.jar.sha1 | 1 - .../licenses/slf4j-api-1.6.2.jar.sha1 | 1 - .../licenses/stax2-api-4.2.1.jar.sha1 | 1 - .../licenses/woodstox-core-6.2.7.jar.sha1 | 1 - .../licenses/api-common-2.2.1.jar.sha1 | 1 - .../licenses/commons-codec-1.14.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/failureaccess-1.0.1.jar.sha1 | 1 - .../licenses/gax-2.0.0.jar.sha1 | 1 - .../licenses/gax-httpjson-0.85.0.jar.sha1 | 1 - .../google-api-client-1.35.1.jar.sha1 | 1 - ...ces-storage-v1-rev20210127-1.32.1.jar.sha1 | 1 - ...le-auth-library-credentials-1.0.0.jar.sha1 | 1 - ...le-auth-library-oauth2-http-1.0.0.jar.sha1 | 1 - .../licenses/google-cloud-core-2.0.2.jar.sha1 | 1 - .../google-cloud-core-http-2.0.2.jar.sha1 | 1 - .../google-cloud-storage-1.118.1.jar.sha1 | 1 - .../google-http-client-1.39.2.jar.sha1 | 1 - ...ogle-http-client-appengine-1.39.2.jar.sha1 | 1 - .../google-http-client-gson-1.39.2.jar.sha1 | 1 - ...oogle-http-client-jackson2-1.39.2.jar.sha1 | 1 - .../google-oauth-client-1.34.1.jar.sha1 | 1 - .../licenses/grpc-context-1.39.0.jar.sha1 | 1 - .../licenses/gson-2.8.9.jar.sha1 | 1 - .../licenses/guava-30.1.1-jre.jar.sha1 | 1 - .../licenses/jackson-core-2.13.2.jar.sha1 | 1 - .../licenses/log4j-1.2-api-2.18.0.jar.sha1 | 1 - .../licenses/opencensus-api-0.28.0.jar.sha1 | 1 - ...encensus-contrib-http-util-0.28.0.jar.sha1 | 1 - .../proto-google-common-protos-2.3.2.jar.sha1 | 1 - .../proto-google-iam-v1-1.0.14.jar.sha1 | 1 - .../licenses/protobuf-java-3.21.1.jar.sha1 | 1 - .../protobuf-java-util-3.17.3.jar.sha1 | 1 - .../licenses/threetenbp-1.5.1.jar.sha1 | 1 - .../aws-java-sdk-core-1.11.749.jar.sha1 | 1 - .../aws-java-sdk-s3-1.11.749.jar.sha1 | 1 - .../aws-java-sdk-sts-1.11.749.jar.sha1 | 1 - .../licenses/commons-codec-1.14.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/httpclient-4.5.13.jar.sha1 | 1 - .../licenses/httpcore-4.4.13.jar.sha1 | 1 - .../jackson-annotations-2.13.2.jar.sha1 | 1 - .../licenses/jackson-core-2.13.2.jar.sha1 | 1 - .../licenses/jackson-databind-2.13.2.jar.sha1 | 1 - .../jackson-dataformat-cbor-2.13.2.jar.sha1 | 1 - .../licenses/jaxb-api-2.2.2.jar.sha1 | 1 - .../licenses/jmespath-java-1.11.749.jar.sha1 | 1 - .../licenses/joda-time-2.8.1.jar.sha1 | 1 - .../licenses/log4j-1.2-api-2.18.0.jar.sha1 | 1 - .../licenses/commons-codec-1.14.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/httpclient-4.5.13.jar.sha1 | 1 - .../licenses/httpcore-4.4.13.jar.sha1 | 1 - .../licenses/log4j-1.2-api-2.18.0.jar.sha1 | 1 - .../netty-buffer-4.1.77.Final.jar.sha1 | 1 - .../netty-codec-4.1.77.Final.jar.sha1 | 1 - .../netty-codec-http-4.1.77.Final.jar.sha1 | 1 - .../netty-common-4.1.77.Final.jar.sha1 | 1 - .../netty-handler-4.1.77.Final.jar.sha1 | 1 - .../netty-resolver-4.1.77.Final.jar.sha1 | 1 - .../netty-transport-4.1.77.Final.jar.sha1 | 1 - .../analysis-icu/licenses/icu4j-68.2.jar.sha1 | 1 - .../licenses/commons-codec-1.14.jar.sha1 | 1 - .../licenses/morfologik-fsa-2.1.1.jar.sha1 | 1 - .../morfologik-stemming-2.1.1.jar.sha1 | 1 - ...morfologik-ukrainian-search-3.7.5.jar.sha1 | 1 - .../licenses/azure-core-0.9.3.jar.sha1 | 1 - .../azure-svc-mgmt-compute-0.9.3.jar.sha1 | 1 - .../licenses/commons-codec-1.14.jar.sha1 | 1 - .../licenses/commons-io-2.4.jar.sha1 | 1 - .../licenses/commons-lang-2.6.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/httpclient-4.5.13.jar.sha1 | 1 - .../licenses/httpcore-4.4.13.jar.sha1 | 1 - .../licenses/javax.inject-1.jar.sha1 | 1 - .../licenses/jaxb-api-2.2.2.jar.sha1 | 1 - .../licenses/jaxb-impl-2.2.3-1.jar.sha1 | 1 - .../licenses/jersey-client-1.13.jar.sha1 | 1 - .../licenses/jersey-core-1.13.jar.sha1 | 1 - .../licenses/joda-time-2.10.10.jar.sha1 | 1 - .../licenses/log4j-1.2-api-2.18.0.jar.sha1 | 1 - .../licenses/mail-1.4.5.jar.sha1 | 1 - .../aws-java-sdk-core-1.11.749.jar.sha1 | 1 - .../aws-java-sdk-ec2-1.11.749.jar.sha1 | 1 - .../licenses/commons-codec-1.14.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/httpclient-4.5.13.jar.sha1 | 1 - .../licenses/httpcore-4.4.13.jar.sha1 | 1 - .../jackson-annotations-2.13.2.jar.sha1 | 1 - .../licenses/jackson-core-2.13.2.jar.sha1 | 1 - .../licenses/jackson-databind-2.13.2.jar.sha1 | 1 - .../jackson-dataformat-cbor-2.13.2.jar.sha1 | 1 - .../licenses/joda-time-2.10.10.jar.sha1 | 1 - .../licenses/log4j-1.2-api-2.18.0.jar.sha1 | 1 - .../licenses/commons-codec-1.14.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/failureaccess-1.0.1.jar.sha1 | 1 - .../google-api-client-1.33.1.jar.sha1 | 1 - ...ces-compute-v1-rev20220322-1.32.1.jar.sha1 | 1 - .../google-http-client-1.41.1.jar.sha1 | 1 - .../google-http-client-gson-1.41.1.jar.sha1 | 1 - ...oogle-http-client-jackson2-1.41.1.jar.sha1 | 1 - .../google-oauth-client-1.33.0.jar.sha1 | 1 - .../licenses/grpc-context-1.27.2.jar.sha1 | 1 - .../licenses/guava-31.0.1-jre.jar.sha1 | 1 - .../licenses/jackson-core-2.13.2.jar.sha1 | 1 - .../licenses/jsr305-3.0.2.jar.sha1 | 1 - .../licenses/log4j-1.2-api-2.18.0.jar.sha1 | 1 - .../licenses/opencensus-api-0.30.0.jar.sha1 | 1 - ...encensus-contrib-http-util-0.30.0.jar.sha1 | 1 - .../licenses/hadoop-client-api-3.3.3.jar.sha1 | 1 - .../licenses/commons-cli-1.2.jar.sha1 | 1 - .../licenses/commons-codec-1.14.jar.sha1 | 1 - .../licenses/commons-io-2.8.0.jar.sha1 | 1 - .../licenses/commons-lang3-3.11.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../hadoop-client-runtime-3.3.3.jar.sha1 | 1 - .../licenses/hadoop-hdfs-3.3.3.jar.sha1 | 1 - .../licenses/javax.servlet-api-3.1.0.jar.sha1 | 1 - .../licenses/log4j-1.2-api-2.18.0.jar.sha1 | 1 - .../licenses/log4j-slf4j-impl-2.18.0.jar.sha1 | 1 - .../licenses/protobuf-java-3.4.0.jar.sha1 | 1 - .../licenses/slf4j-api-1.6.2.jar.sha1 | 1 - server/licenses/HdrHistogram-2.1.9.jar.sha1 | 1 - .../licenses/ecs-logging-core-1.2.0.jar.sha1 | 1 - server/licenses/hppc-0.8.1.jar.sha1 | 1 - server/licenses/jna-5.10.0.jar.sha1 | 1 - server/licenses/log4j-api-2.18.0.jar.sha1 | 1 - server/licenses/log4j-core-2.18.0.jar.sha1 | 1 - .../licenses/log4j2-ecs-layout-1.2.0.jar.sha1 | 1 - server/licenses/t-digest-3.2.jar.sha1 | 1 - .../licenses/commons-compress-1.21.jar.sha1 | 1 - .../licenses/commons-lang3-3.9.jar.sha1 | 1 - .../jackson-annotations-2.13.2.jar.sha1 | 1 - .../licenses/jackson-core-2.13.2.jar.sha1 | 1 - .../licenses/jackson-databind-2.13.2.jar.sha1 | 1 - .../json-schema-validator-1.0.48.jar.sha1 | 1 - .../licenses/commons-math3-3.6.1.jar.sha1 | 1 - .../core/licenses/commons-codec-1.14.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/httpasyncclient-4.1.5.jar.sha1 | 1 - .../core/licenses/httpclient-4.5.13.jar.sha1 | 1 - .../core/licenses/httpcore-4.4.13.jar.sha1 | 1 - .../licenses/httpcore-nio-4.4.13.jar.sha1 | 1 - .../licenses/log4j-1.2-api-2.18.0.jar.sha1 | 1 - .../licenses/unboundid-ldapsdk-6.0.3.jar.sha1 | 1 - .../qa/common/licenses/jtoml-2.0.0.jar.sha1 | 1 - .../licenses/cryptacular-1.2.4.jar.sha1 | 1 - .../licenses/failureaccess-1.0.1.jar.sha1 | 1 - .../licenses/guava-28.2-jre.jar.sha1 | 1 - .../licenses/httpclient-cache-4.5.13.jar.sha1 | 1 - .../licenses/java-support-8.0.0.jar.sha1 | 1 - .../licenses/jsr305-3.0.2.jar.sha1 | 1 - .../licenses/log4j-slf4j-impl-2.18.0.jar.sha1 | 1 - .../licenses/metrics-core-4.1.4.jar.sha1 | 1 - .../licenses/opensaml-core-4.0.1.jar.sha1 | 1 - .../opensaml-messaging-api-4.0.1.jar.sha1 | 1 - .../opensaml-messaging-impl-4.0.1.jar.sha1 | 1 - .../opensaml-profile-api-4.0.1.jar.sha1 | 1 - .../opensaml-profile-impl-4.0.1.jar.sha1 | 1 - .../licenses/opensaml-saml-api-4.0.1.jar.sha1 | 1 - .../opensaml-saml-impl-4.0.1.jar.sha1 | 1 - .../opensaml-security-api-4.0.1.jar.sha1 | 1 - .../opensaml-security-impl-4.0.1.jar.sha1 | 1 - .../licenses/opensaml-soap-api-4.0.1.jar.sha1 | 1 - .../opensaml-soap-impl-4.0.1.jar.sha1 | 1 - .../opensaml-storage-api-4.0.1.jar.sha1 | 1 - .../opensaml-storage-impl-4.0.1.jar.sha1 | 1 - .../opensaml-xmlsec-api-4.0.1.jar.sha1 | 1 - .../opensaml-xmlsec-impl-4.0.1.jar.sha1 | 1 - .../licenses/slf4j-api-1.6.2.jar.sha1 | 1 - .../licenses/xmlsec-2.1.4.jar.sha1 | 1 - .../ml/licenses/commons-math3-3.6.1.jar.sha1 | 1 - x-pack/plugin/ml/licenses/icu4j-68.2.jar.sha1 | 1 - .../plugin/ml/licenses/ojalgo-51.2.0.jar.sha1 | 1 - .../ql/licenses/antlr4-runtime-4.9.2.jar.sha1 | 1 - .../cli/licenses/bcpkix-jdk15on-1.64.jar.sha1 | 1 - .../cli/licenses/bcprov-jdk15on-1.64.jar.sha1 | 1 - .../cli/licenses/commons-io-2.5.jar.sha1 | 1 - .../licenses/accessors-smart-2.4.2.jar.sha1 | 1 - .../security/licenses/asm-8.0.1.jar.sha1 | 1 - .../licenses/cryptacular-1.2.4.jar.sha1 | 1 - .../licenses/failureaccess-1.0.1.jar.sha1 | 1 - .../security/licenses/guava-28.2-jre.jar.sha1 | 1 - .../licenses/httpclient-cache-4.5.13.jar.sha1 | 1 - .../licenses/jakarta.mail-1.6.3.jar.sha1 | 1 - .../licenses/java-support-8.0.0.jar.sha1 | 1 - .../licenses/jcip-annotations-1.0.jar.sha1 | 1 - .../licenses/joda-time-2.10.10.jar.sha1 | 1 - .../licenses/json-smart-2.4.2.jar.sha1 | 1 - .../security/licenses/jsr305-3.0.2.jar.sha1 | 1 - .../security/licenses/lang-tag-1.4.4.jar.sha1 | 1 - .../licenses/log4j-slf4j-impl-2.18.0.jar.sha1 | 1 - .../licenses/metrics-core-4.1.4.jar.sha1 | 1 - .../licenses/nimbus-jose-jwt-9.8.1.jar.sha1 | 1 - .../licenses/oauth2-oidc-sdk-9.3.1.jar.sha1 | 1 - .../licenses/opensaml-core-4.0.1.jar.sha1 | 1 - .../opensaml-messaging-api-4.0.1.jar.sha1 | 1 - .../opensaml-messaging-impl-4.0.1.jar.sha1 | 1 - .../opensaml-profile-api-4.0.1.jar.sha1 | 1 - .../opensaml-profile-impl-4.0.1.jar.sha1 | 1 - .../licenses/opensaml-saml-api-4.0.1.jar.sha1 | 1 - .../opensaml-saml-impl-4.0.1.jar.sha1 | 1 - .../opensaml-security-api-4.0.1.jar.sha1 | 1 - .../opensaml-security-impl-4.0.1.jar.sha1 | 1 - .../licenses/opensaml-soap-api-4.0.1.jar.sha1 | 1 - .../opensaml-soap-impl-4.0.1.jar.sha1 | 1 - .../opensaml-storage-api-4.0.1.jar.sha1 | 1 - .../opensaml-storage-impl-4.0.1.jar.sha1 | 1 - .../opensaml-xmlsec-api-4.0.1.jar.sha1 | 1 - .../opensaml-xmlsec-impl-4.0.1.jar.sha1 | 1 - .../licenses/slf4j-api-1.6.2.jar.sha1 | 1 - .../security/licenses/xmlsec-2.1.4.jar.sha1 | 1 - .../licenses/jline-reader-3.21.0.jar.sha1 | 1 - .../licenses/jline-style-3.21.0.jar.sha1 | 1 - .../licenses/jline-terminal-3.21.0.jar.sha1 | 1 - .../jline-terminal-jna-3.21.0.jar.sha1 | 1 - .../sql/sql-cli/licenses/jna-5.10.0.jar.sha1 | 1 - .../licenses/jackson-core-2.13.2.jar.sha1 | 1 - .../jackson-dataformat-cbor-2.13.2.jar.sha1 | 1 - .../licenses/icu4j-68.2.jar.sha1 | 1 - .../licenses/super-csv-2.4.0.jar.sha1 | 1 - .../licenses/log4j-slf4j-impl-2.18.0.jar.sha1 | 1 - .../mapbox-vector-tile-3.1.0.jar.sha1 | 1 - .../licenses/protobuf-java-3.16.1.jar.sha1 | 1 - .../licenses/slf4j-api-1.6.2.jar.sha1 | 1 - .../licenses/failureaccess-1.0.1.jar.sha1 | 1 - .../watcher/licenses/guava-27.1-jre.jar.sha1 | 1 - .../jakarta.activation-1.2.1.jar.sha1 | 1 - .../licenses/jakarta.mail-1.6.4.jar.sha1 | 1 - ...sp-java-html-sanitizer-20211018.2.jar.sha1 | 1 - 348 files changed, 3642 insertions(+), 734 deletions(-) delete mode 100644 build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTask.java delete mode 100644 build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTaskTests.java delete mode 100644 client/rest/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 client/rest/licenses/commons-logging-1.1.3.jar.sha1 delete mode 100644 client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 delete mode 100644 client/rest/licenses/httpclient-4.5.13.jar.sha1 delete mode 100644 client/rest/licenses/httpcore-4.4.13.jar.sha1 delete mode 100644 client/rest/licenses/httpcore-nio-4.4.13.jar.sha1 delete mode 100644 client/sniffer/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 delete mode 100644 client/sniffer/licenses/httpclient-4.5.13.jar.sha1 delete mode 100644 client/sniffer/licenses/httpcore-4.4.13.jar.sha1 delete mode 100644 client/sniffer/licenses/jackson-core-2.13.2.jar.sha1 delete mode 100644 distribution/tools/ansi-console/licenses/jansi-2.4.0.jar.sha1 delete mode 100644 distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.jar.sha1 delete mode 100644 distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.4.jar.sha1 create mode 100644 gradle/verification-metadata.xml delete mode 100644 libs/cli/licenses/jopt-simple-5.0.2.jar.sha1 delete mode 100644 libs/grok/licenses/jcodings-1.0.44.jar.sha1 delete mode 100644 libs/grok/licenses/joni-2.1.29.jar.sha1 delete mode 100644 libs/lz4/licenses/lz4-java-1.8.0.jar.sha1 delete mode 100644 libs/x-content/impl/licenses/jackson-core-2.13.2.jar.sha1 delete mode 100644 libs/x-content/impl/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 delete mode 100644 libs/x-content/impl/licenses/jackson-dataformat-smile-2.13.2.jar.sha1 delete mode 100644 libs/x-content/impl/licenses/jackson-dataformat-yaml-2.13.2.jar.sha1 delete mode 100644 libs/x-content/impl/licenses/snakeyaml-1.30.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/apache-mime4j-core-0.8.5.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/apache-mime4j-dom-0.8.5.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/commons-collections4-4.1.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/commons-compress-1.21.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/commons-io-2.11.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/commons-lang3-3.9.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/commons-logging-1.1.3.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/commons-math3-3.6.1.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/fontbox-2.0.26.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/jempbox-1.8.16.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/juniversalchardet-1.0.3.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/pdfbox-2.0.26.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/poi-5.2.2.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tagsoup-1.2.1.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tika-langdetect-tika-2.4.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tika-parser-apple-module-2.4.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tika-parser-html-module-2.4.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tika-parser-microsoft-module-2.4.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tika-parser-miscoffice-module-2.4.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tika-parser-pdf-module-2.4.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tika-parser-text-module-2.4.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tika-parser-xml-module-2.4.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tika-parser-xmp-commons-2.4.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/tika-parser-zip-commons-2.4.0.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 delete mode 100644 modules/ingest-attachment/licenses/xz-1.8.jar.sha1 delete mode 100644 modules/ingest-common/licenses/httpclient-4.5.13.jar.sha1 delete mode 100644 modules/ingest-common/licenses/httpcore-4.4.13.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/geoip2-3.0.0.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-annotations-2.13.1.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-core-2.13.1.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.13.1.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 delete mode 100644 modules/lang-expression/licenses/antlr4-runtime-4.5.1-1.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-7.2.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-analysis-7.2.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-commons-7.2.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-tree-7.2.jar.sha1 delete mode 100644 modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 delete mode 100644 modules/lang-painless/licenses/antlr4-runtime-4.5.3.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-7.2.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-analysis-7.2.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-commons-7.2.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-tree-7.2.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-util-7.2.jar.sha1 delete mode 100644 modules/legacy-geo/licenses/jackson-core-2.13.2.jar.sha1 delete mode 100644 modules/legacy-geo/licenses/jts-core-1.15.0.jar.sha1 delete mode 100644 modules/legacy-geo/licenses/s2-geometry-library-java-1.0.1.jar.sha1 delete mode 100644 modules/legacy-geo/licenses/spatial4j-0.7.jar.sha1 delete mode 100644 modules/repository-azure/licenses/azure-core-1.27.0.jar.sha1 delete mode 100644 modules/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 delete mode 100644 modules/repository-azure/licenses/azure-storage-blob-12.16.0.jar.sha1 delete mode 100644 modules/repository-azure/licenses/azure-storage-common-12.15.1.jar.sha1 delete mode 100644 modules/repository-azure/licenses/jackson-annotations-2.13.2.jar.sha1 delete mode 100644 modules/repository-azure/licenses/jackson-core-2.13.2.jar.sha1 delete mode 100644 modules/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1 delete mode 100644 modules/repository-azure/licenses/jackson-dataformat-xml-2.13.2.jar.sha1 delete mode 100644 modules/repository-azure/licenses/jackson-datatype-jsr310-2.13.2.jar.sha1 delete mode 100644 modules/repository-azure/licenses/jackson-module-jaxb-annotations-2.13.2.jar.sha1 delete mode 100644 modules/repository-azure/licenses/jakarta.activation-api-1.2.1.jar.sha1 delete mode 100644 modules/repository-azure/licenses/jakarta.xml.bind-api-2.3.2.jar.sha1 delete mode 100644 modules/repository-azure/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-buffer-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-codec-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-codec-dns-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-codec-http-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-codec-http2-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-codec-socks-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-common-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-handler-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-handler-proxy-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-resolver-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-resolver-dns-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-transport-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/netty-transport-native-unix-common-4.1.77.Final.jar.sha1 delete mode 100644 modules/repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 delete mode 100644 modules/repository-azure/licenses/reactor-core-3.4.14.jar.sha1 delete mode 100644 modules/repository-azure/licenses/reactor-netty-core-1.0.15.jar.sha1 delete mode 100644 modules/repository-azure/licenses/reactor-netty-http-1.0.15.jar.sha1 delete mode 100644 modules/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 delete mode 100644 modules/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 delete mode 100644 modules/repository-azure/licenses/woodstox-core-6.2.7.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/api-common-2.2.1.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/failureaccess-1.0.1.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/gax-2.0.0.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/gax-httpjson-0.85.0.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-api-client-1.35.1.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-api-services-storage-v1-rev20210127-1.32.1.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-auth-library-credentials-1.0.0.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-auth-library-oauth2-http-1.0.0.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-cloud-core-2.0.2.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-cloud-core-http-2.0.2.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-cloud-storage-1.118.1.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-http-client-1.39.2.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-http-client-appengine-1.39.2.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-http-client-gson-1.39.2.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-http-client-jackson2-1.39.2.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/google-oauth-client-1.34.1.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/grpc-context-1.39.0.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/gson-2.8.9.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/guava-30.1.1-jre.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/jackson-core-2.13.2.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/log4j-1.2-api-2.18.0.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/opencensus-api-0.28.0.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/opencensus-contrib-http-util-0.28.0.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/proto-google-common-protos-2.3.2.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/proto-google-iam-v1-1.0.14.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/protobuf-java-3.21.1.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/protobuf-java-util-3.17.3.jar.sha1 delete mode 100644 modules/repository-gcs/licenses/threetenbp-1.5.1.jar.sha1 delete mode 100644 modules/repository-s3/licenses/aws-java-sdk-core-1.11.749.jar.sha1 delete mode 100644 modules/repository-s3/licenses/aws-java-sdk-s3-1.11.749.jar.sha1 delete mode 100644 modules/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 delete mode 100644 modules/repository-s3/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 modules/repository-s3/licenses/commons-logging-1.1.3.jar.sha1 delete mode 100644 modules/repository-s3/licenses/httpclient-4.5.13.jar.sha1 delete mode 100644 modules/repository-s3/licenses/httpcore-4.4.13.jar.sha1 delete mode 100644 modules/repository-s3/licenses/jackson-annotations-2.13.2.jar.sha1 delete mode 100644 modules/repository-s3/licenses/jackson-core-2.13.2.jar.sha1 delete mode 100644 modules/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1 delete mode 100644 modules/repository-s3/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 delete mode 100644 modules/repository-s3/licenses/jaxb-api-2.2.2.jar.sha1 delete mode 100644 modules/repository-s3/licenses/jmespath-java-1.11.749.jar.sha1 delete mode 100644 modules/repository-s3/licenses/joda-time-2.8.1.jar.sha1 delete mode 100644 modules/repository-s3/licenses/log4j-1.2-api-2.18.0.jar.sha1 delete mode 100644 modules/repository-url/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 modules/repository-url/licenses/commons-logging-1.1.3.jar.sha1 delete mode 100644 modules/repository-url/licenses/httpclient-4.5.13.jar.sha1 delete mode 100644 modules/repository-url/licenses/httpcore-4.4.13.jar.sha1 delete mode 100644 modules/repository-url/licenses/log4j-1.2-api-2.18.0.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.77.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.77.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.77.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.77.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.77.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.77.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.77.Final.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/icu4j-68.2.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.1.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.1.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/morfologik-ukrainian-search-3.7.5.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/azure-core-0.9.3.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/commons-io-2.4.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/commons-lang-2.6.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/commons-logging-1.1.3.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/httpclient-4.5.13.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/httpcore-4.4.13.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/javax.inject-1.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/jaxb-api-2.2.2.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/jaxb-impl-2.2.3-1.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/jersey-client-1.13.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/jersey-core-1.13.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/joda-time-2.10.10.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.18.0.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/mail-1.4.5.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.749.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.749.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/commons-logging-1.1.3.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/httpclient-4.5.13.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/httpcore-4.4.13.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-annotations-2.13.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-core-2.13.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/joda-time-2.10.10.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/log4j-1.2-api-2.18.0.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/commons-logging-1.1.3.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/failureaccess-1.0.1.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-api-client-1.33.1.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20220322-1.32.1.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-http-client-1.41.1.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-http-client-gson-1.41.1.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-http-client-jackson2-1.41.1.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.33.0.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/grpc-context-1.27.2.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/guava-31.0.1-jre.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/jackson-core-2.13.2.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/jsr305-3.0.2.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/log4j-1.2-api-2.18.0.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/opencensus-api-0.30.0.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.30.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/hadoop-client-api/licenses/hadoop-client-api-3.3.3.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/commons-io-2.8.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/commons-lang3-3.11.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/commons-logging-1.1.3.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/javax.servlet-api-3.1.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/log4j-1.2-api-2.18.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.4.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 delete mode 100644 server/licenses/HdrHistogram-2.1.9.jar.sha1 delete mode 100644 server/licenses/ecs-logging-core-1.2.0.jar.sha1 delete mode 100644 server/licenses/hppc-0.8.1.jar.sha1 delete mode 100644 server/licenses/jna-5.10.0.jar.sha1 delete mode 100644 server/licenses/log4j-api-2.18.0.jar.sha1 delete mode 100644 server/licenses/log4j-core-2.18.0.jar.sha1 delete mode 100644 server/licenses/log4j2-ecs-layout-1.2.0.jar.sha1 delete mode 100644 server/licenses/t-digest-3.2.jar.sha1 delete mode 100644 test/x-content/licenses/commons-compress-1.21.jar.sha1 delete mode 100644 test/x-content/licenses/commons-lang3-3.9.jar.sha1 delete mode 100644 test/x-content/licenses/jackson-annotations-2.13.2.jar.sha1 delete mode 100644 test/x-content/licenses/jackson-core-2.13.2.jar.sha1 delete mode 100644 test/x-content/licenses/jackson-databind-2.13.2.jar.sha1 delete mode 100644 test/x-content/licenses/json-schema-validator-1.0.48.jar.sha1 delete mode 100644 x-pack/plugin/analytics/licenses/commons-math3-3.6.1.jar.sha1 delete mode 100644 x-pack/plugin/core/licenses/commons-codec-1.14.jar.sha1 delete mode 100644 x-pack/plugin/core/licenses/commons-logging-1.1.3.jar.sha1 delete mode 100644 x-pack/plugin/core/licenses/httpasyncclient-4.1.5.jar.sha1 delete mode 100644 x-pack/plugin/core/licenses/httpclient-4.5.13.jar.sha1 delete mode 100644 x-pack/plugin/core/licenses/httpcore-4.4.13.jar.sha1 delete mode 100644 x-pack/plugin/core/licenses/httpcore-nio-4.4.13.jar.sha1 delete mode 100644 x-pack/plugin/core/licenses/log4j-1.2-api-2.18.0.jar.sha1 delete mode 100644 x-pack/plugin/core/licenses/unboundid-ldapsdk-6.0.3.jar.sha1 delete mode 100644 x-pack/plugin/eql/qa/common/licenses/jtoml-2.0.0.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/cryptacular-1.2.4.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/failureaccess-1.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/guava-28.2-jre.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/httpclient-cache-4.5.13.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/java-support-8.0.0.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/jsr305-3.0.2.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/metrics-core-4.1.4.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-core-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-messaging-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-messaging-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-profile-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-profile-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-saml-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-saml-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-security-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-security-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-soap-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-soap-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-storage-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-storage-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-xmlsec-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/opensaml-xmlsec-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/slf4j-api-1.6.2.jar.sha1 delete mode 100644 x-pack/plugin/identity-provider/licenses/xmlsec-2.1.4.jar.sha1 delete mode 100644 x-pack/plugin/ml/licenses/commons-math3-3.6.1.jar.sha1 delete mode 100644 x-pack/plugin/ml/licenses/icu4j-68.2.jar.sha1 delete mode 100644 x-pack/plugin/ml/licenses/ojalgo-51.2.0.jar.sha1 delete mode 100644 x-pack/plugin/ql/licenses/antlr4-runtime-4.9.2.jar.sha1 delete mode 100644 x-pack/plugin/security/cli/licenses/bcpkix-jdk15on-1.64.jar.sha1 delete mode 100644 x-pack/plugin/security/cli/licenses/bcprov-jdk15on-1.64.jar.sha1 delete mode 100644 x-pack/plugin/security/cli/licenses/commons-io-2.5.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/accessors-smart-2.4.2.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/asm-8.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/cryptacular-1.2.4.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/failureaccess-1.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/guava-28.2-jre.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/httpclient-cache-4.5.13.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/jakarta.mail-1.6.3.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/java-support-8.0.0.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/jcip-annotations-1.0.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/joda-time-2.10.10.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/json-smart-2.4.2.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/jsr305-3.0.2.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/lang-tag-1.4.4.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/metrics-core-4.1.4.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/nimbus-jose-jwt-9.8.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/oauth2-oidc-sdk-9.3.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-core-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-messaging-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-messaging-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-profile-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-profile-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-saml-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-saml-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-security-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-security-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-soap-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-soap-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-storage-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-storage-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-xmlsec-api-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/opensaml-xmlsec-impl-4.0.1.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/slf4j-api-1.6.2.jar.sha1 delete mode 100644 x-pack/plugin/security/licenses/xmlsec-2.1.4.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-cli/licenses/jline-reader-3.21.0.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-cli/licenses/jline-style-3.21.0.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-cli/licenses/jline-terminal-3.21.0.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-cli/licenses/jline-terminal-jna-3.21.0.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-cli/licenses/jna-5.10.0.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-proto/licenses/jackson-core-2.13.2.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-proto/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 delete mode 100644 x-pack/plugin/text-structure/licenses/icu4j-68.2.jar.sha1 delete mode 100644 x-pack/plugin/text-structure/licenses/super-csv-2.4.0.jar.sha1 delete mode 100644 x-pack/plugin/vector-tile/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 delete mode 100644 x-pack/plugin/vector-tile/licenses/mapbox-vector-tile-3.1.0.jar.sha1 delete mode 100644 x-pack/plugin/vector-tile/licenses/protobuf-java-3.16.1.jar.sha1 delete mode 100644 x-pack/plugin/vector-tile/licenses/slf4j-api-1.6.2.jar.sha1 delete mode 100644 x-pack/plugin/watcher/licenses/failureaccess-1.0.1.jar.sha1 delete mode 100644 x-pack/plugin/watcher/licenses/guava-27.1-jre.jar.sha1 delete mode 100644 x-pack/plugin/watcher/licenses/jakarta.activation-1.2.1.jar.sha1 delete mode 100644 x-pack/plugin/watcher/licenses/jakarta.mail-1.6.4.jar.sha1 delete mode 100644 x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-20211018.2.jar.sha1 diff --git a/BUILDING.md b/BUILDING.md index 4d82791ce9413..7d3261c0327d1 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -63,6 +63,38 @@ E.g. [configuration-cache support](https://github.com/elastic/elasticsearch/issu There are a few guidelines to follow that should make your life easier to make changes to the elasticsearch build. Please add a member of the `es-delivery` team as a reviewer if you're making non-trivial changes to the build. +#### Adding or updating a dependency + +We rely on [Gradle dependency verification](https://docs.gradle.org/current/userguide/dependency_verification.html) to mitigate the security risks and avoid integrating compromised dependencies. + +This requires to have third party dependencies and their checksums listed in `gradle/verification-metadata.xml`. + +For updated or newly added dependencies you need to add an entry to this verification file or update the existing one: +``` + + + + + +``` + +You can also automate the generation of this entry by running your build using the `--write-verification-metadata` commandline option: +``` +>./gradlew --write-verification-metadata sha256 precommit +``` + +The `--write-verification-metadata` Gradle option is generally able to resolve reachable configurations, +but we use detached configurations for a certain set of plugins and tasks. Therefore, please ensure you run this option with a task that +uses the changed dependencies. In most cases, `precommit` or `check` are good candidates. + +We prefer sha256 checksums as md5 and sha1 are not considered safe anymore these days. The generated entry +will have the `origin` attribute been set to `Generated by Gradle`. + +>A manual confirmation of the Gradle generated checksums is currently not mandatory. +>If you want to add a level of verification you can manually confirm the checksum (e.g by looking it up on the website of the library) +>Please replace the content of the `origin` attribute by `official site` in that case. +> + #### Custom Plugin and Task implementations Build logic that is used across multiple subprojects should considered to be moved into a Gradle plugin with according Gradle task implmentation. diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesPrecommitPlugin.java index 1fbefef45c8e6..3bc6697930198 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesPrecommitPlugin.java @@ -34,9 +34,6 @@ public TaskProvider createTask(Project project) { runtimeClasspath.fileCollection(dependency -> dependency instanceof ProjectDependency == false).minus(compileOnly) ); }); - - // we also create the updateShas helper task that is associated with dependencyLicenses - project.getTasks().register("updateShas", UpdateShasTask.class, t -> t.setParentTask(dependencyLicenses)); return dependencyLicenses; } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java index 59f8c2da0d718..71de2626d5fca 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.gradle.internal.precommit; -import org.apache.commons.codec.binary.Hex; import org.elasticsearch.gradle.internal.precommit.LicenseAnalyzer.LicenseInfo; import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; @@ -23,30 +22,21 @@ import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputDirectory; import org.gradle.api.tasks.InputFiles; -import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Optional; import org.gradle.api.tasks.OutputDirectory; import org.gradle.api.tasks.TaskAction; import java.io.File; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Collectors; import javax.inject.Inject; @@ -193,7 +183,7 @@ public void ignoreFile(String file) { } @TaskAction - public void checkDependencies() throws IOException, NoSuchAlgorithmException { + public void checkDependencies() { if (dependencies == null) { throw new GradleException("No dependencies variable defined."); } @@ -214,12 +204,9 @@ public void checkDependencies() throws IOException, NoSuchAlgorithmException { Map licenses = new HashMap<>(); Map notices = new HashMap<>(); Map sources = new HashMap<>(); - Set shaFiles = new HashSet<>(); for (File file : licensesDirAsFile.listFiles()) { String name = file.getName(); - if (name.endsWith(SHA_EXTENSION)) { - shaFiles.add(file); - } else if (name.endsWith("-LICENSE") || name.endsWith("-LICENSE.txt")) { + if (name.endsWith("-LICENSE") || name.endsWith("-LICENSE.txt")) { // TODO: why do we support suffix of LICENSE *and* LICENSE.txt?? licenses.put(name, false); } else if (name.contains("-NOTICE") || name.contains("-NOTICE.txt")) { @@ -233,18 +220,13 @@ public void checkDependencies() throws IOException, NoSuchAlgorithmException { notices.keySet().removeAll(ignoreFiles); sources.keySet().removeAll(ignoreFiles); - checkDependencies(licenses, notices, sources, shaFiles); + checkDependencies(licenses, notices, sources); licenses.forEach((item, exists) -> failIfAnyMissing(item, exists, "license")); notices.forEach((item, exists) -> failIfAnyMissing(item, exists, "notice")); sources.forEach((item, exists) -> failIfAnyMissing(item, exists, "sources")); - - if (shaFiles.isEmpty() == false) { - throw new GradleException("Unused sha files found: \n" + joinFilenames(shaFiles)); - } - } // This is just a marker output folder to allow this task being up-to-date. @@ -261,18 +243,10 @@ private void failIfAnyMissing(String item, Boolean exists, String type) { } } - private void checkDependencies( - Map licenses, - Map notices, - Map sources, - Set shaFiles - ) throws NoSuchAlgorithmException, IOException { + private void checkDependencies(Map licenses, Map notices, Map sources) { for (File dependency : dependencies) { String jarName = dependency.getName(); String depName = regex.matcher(jarName).replaceFirst(""); - - validateSha(shaFiles, dependency, jarName, depName); - String dependencyName = getDependencyName(mappings, depName); logger.info("mapped dependency name {} to {} for license/notice check", depName, dependencyName); checkFile(dependencyName, jarName, licenses, "LICENSE"); @@ -286,24 +260,6 @@ private void checkDependencies( } } - private void validateSha(Set shaFiles, File dependency, String jarName, String depName) throws NoSuchAlgorithmException, - IOException { - if (ignoreShas.contains(depName)) { - // local deps should not have sha files! - if (getShaFile(jarName).exists()) { - throw new GradleException("SHA file " + getShaFile(jarName) + " exists for ignored dependency " + depName); - } - } else { - logger.info("Checking sha for {}", jarName); - checkSha(dependency, jarName, shaFiles); - } - } - - private String joinFilenames(Set shaFiles) { - List names = shaFiles.stream().map(File::getName).collect(Collectors.toList()); - return String.join("\n", names); - } - public static String getDependencyName(Map mappings, String dependencyName) { // order is the same for keys and values iteration since we use a linked hashmap List mapped = new ArrayList<>(mappings.values()); @@ -319,30 +275,6 @@ public static String getDependencyName(Map mappings, String depe return dependencyName; } - private void checkSha(File jar, String jarName, Set shaFiles) throws NoSuchAlgorithmException, IOException { - File shaFile = getShaFile(jarName); - if (shaFile.exists() == false) { - throw new GradleException("Missing SHA for " + jarName + ". Run \"gradle updateSHAs\" to create them"); - } - - // TODO: shouldn't have to trim, sha files should not have trailing newline - byte[] fileBytes = Files.readAllBytes(shaFile.toPath()); - String expectedSha = new String(fileBytes, StandardCharsets.UTF_8).trim(); - - String sha = getSha1(jar); - - if (expectedSha.equals(sha) == false) { - final String exceptionMessage = String.format(Locale.ROOT, """ - SHA has changed! Expected %s for %s but got %s. - This usually indicates a corrupt dependency cache or artifacts changed upstream. - Either wipe your cache, fix the upstream artifact, or delete %s and run updateShas - """, expectedSha, jarName, sha, shaFile); - - throw new GradleException(exceptionMessage); - } - shaFiles.remove(shaFile); - } - private void checkFile(String name, String jarName, Map counters, String type) { String fileName = getFileName(name, counters, type); @@ -375,27 +307,4 @@ public LinkedHashMap getMappings() { return new LinkedHashMap<>(mappings); } - File getShaFile(String jarName) { - return new File(licensesDir.get().getAsFile(), jarName + SHA_EXTENSION); - } - - @Internal - Set getShaFiles() { - File licenseDirAsFile = licensesDir.get().getAsFile(); - File[] array = licenseDirAsFile.listFiles(); - if (array == null) { - throw new GradleException("\"" + licenseDirAsFile.getPath() + "\" isn't a valid directory"); - } - - return Arrays.stream(array).filter(file -> file.getName().endsWith(SHA_EXTENSION)).collect(Collectors.toSet()); - } - - String getSha1(File file) throws IOException, NoSuchAlgorithmException { - byte[] bytes = Files.readAllBytes(file.toPath()); - - MessageDigest digest = MessageDigest.getInstance("SHA-1"); - char[] encoded = Hex.encodeHex(digest.digest(bytes)); - return String.copyValueOf(encoded); - } - } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTask.java deleted file mode 100644 index e3140a9d71b6b..0000000000000 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTask.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.internal.precommit; - -import org.gradle.api.DefaultTask; -import org.gradle.api.logging.Logger; -import org.gradle.api.logging.Logging; -import org.gradle.api.tasks.Internal; -import org.gradle.api.tasks.TaskAction; -import org.gradle.api.tasks.TaskProvider; - -import java.io.File; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.StandardOpenOption; -import java.security.NoSuchAlgorithmException; -import java.util.Set; - -/** - * A task to update shas used by {@code DependencyLicensesCheck} - */ -public class UpdateShasTask extends DefaultTask { - - private final Logger logger = Logging.getLogger(getClass()); - - /** The parent dependency licenses task to use configuration from */ - private TaskProvider parentTask; - - public UpdateShasTask() { - setDescription("Updates the sha files for the dependencyLicenses check"); - setOnlyIf(element -> parentTask.get().getLicensesDir() != null); - } - - @TaskAction - public void updateShas() throws NoSuchAlgorithmException, IOException { - Set shaFiles = parentTask.get().getShaFiles(); - - for (File dependency : parentTask.get().getDependencies()) { - String jarName = dependency.getName(); - File shaFile = parentTask.get().getShaFile(jarName); - - if (shaFile.exists() == false) { - createSha(dependency, jarName, shaFile); - } else { - shaFiles.remove(shaFile); - } - } - - for (File shaFile : shaFiles) { - logger.lifecycle("Removing unused sha " + shaFile.getName()); - shaFile.delete(); - } - } - - private void createSha(File dependency, String jarName, File shaFile) throws IOException, NoSuchAlgorithmException { - logger.lifecycle("Adding sha for " + jarName); - - String sha = parentTask.get().getSha1(dependency); - - Files.write(shaFile.toPath(), sha.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE); - } - - @Internal - public DependencyLicensesTask getParentTask() { - return parentTask.get(); - } - - public void setParentTask(TaskProvider parentTask) { - this.parentTask = parentTask; - } -} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTaskTests.java index e6b1f5c90b72e..1a9284276043c 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTaskTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTaskTests.java @@ -26,8 +26,6 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardOpenOption; -import java.security.NoSuchAlgorithmException; import java.util.HashMap; import java.util.Map; @@ -41,8 +39,6 @@ public class DependencyLicensesTaskTests { @Rule public ExpectedException expectedException = ExpectedException.none(); - private UpdateShasTask updateShas; - private TaskProvider task; private Project project; @@ -53,7 +49,6 @@ public class DependencyLicensesTaskTests { public void prepare() { project = createProject(); task = createDependencyLicensesTask(project); - updateShas = createUpdateShasTask(project, task); dependency = project.getDependencies().localGroovy(); task.configure(new Action() { @Override @@ -87,19 +82,6 @@ public void givenProjectWithoutLicensesDirNorDependenciesThenShouldReturnSilentl task.get().checkDependencies(); } - @Test - public void givenProjectWithDependencyButNoShaFileThenShouldReturnException() throws Exception { - expectedException.expect(GradleException.class); - expectedException.expectMessage(containsString("Missing SHA for ")); - - File licensesDir = getLicensesDir(project); - createFileIn(licensesDir, "groovy-all-LICENSE.txt", PERMISSIVE_LICENSE_TEXT); - createFileIn(licensesDir, "groovy-all-NOTICE.txt", ""); - - project.getDependencies().add("implementation", project.getDependencies().localGroovy()); - task.get().checkDependencies(); - } - @Test public void givenProjectWithDependencyButNoLicenseFileThenShouldReturnException() throws Exception { expectedException.expect(GradleException.class); @@ -108,7 +90,6 @@ public void givenProjectWithDependencyButNoLicenseFileThenShouldReturnException( project.getDependencies().add("implementation", project.getDependencies().localGroovy()); getLicensesDir(project).mkdir(); - updateShas.updateShas(); task.get().checkDependencies(); } @@ -121,7 +102,6 @@ public void givenProjectWithDependencyButNoNoticeFileThenShouldReturnException() createFileIn(getLicensesDir(project), "groovy-LICENSE.txt", PERMISSIVE_LICENSE_TEXT); - updateShas.updateShas(); task.get().checkDependencies(); } @@ -135,7 +115,6 @@ public void givenProjectWithStrictDependencyButNoSourcesFileThenShouldReturnExce createFileIn(getLicensesDir(project), "groovy-LICENSE.txt", STRICT_LICENSE_TEXT); createFileIn(getLicensesDir(project), "groovy-NOTICE.txt", ""); - updateShas.updateShas(); task.get().checkDependencies(); } @@ -147,7 +126,6 @@ public void givenProjectWithStrictDependencyAndEverythingInOrderThenShouldReturn createFileIn(getLicensesDir(project), "groovy-NOTICE.txt", ""); createFileIn(getLicensesDir(project), "groovy-SOURCES.txt", ""); - updateShas.updateShas(); task.get().checkDependencies(); } @@ -190,37 +168,6 @@ public void givenProjectWithANoticeButWithoutTheDependencyThenShouldThrowExcepti task.get().checkDependencies(); } - @Test - public void givenProjectWithAShaButWithoutTheDependencyThenShouldThrowException() throws Exception { - expectedException.expect(GradleException.class); - expectedException.expectMessage(containsString("Unused sha files found: \n")); - - project.getDependencies().add("implementation", dependency); - - File licensesDir = getLicensesDir(project); - createAllDefaultDependencyFiles(licensesDir, "groovy"); - createFileIn(licensesDir, "non-declared.sha1", ""); - - task.get().checkDependencies(); - } - - @Test - public void givenProjectWithADependencyWithWrongShaThenShouldThrowException() throws Exception { - expectedException.expect(GradleException.class); - expectedException.expectMessage(containsString("SHA has changed! Expected ")); - - project.getDependencies().add("implementation", dependency); - - File licensesDir = getLicensesDir(project); - createAllDefaultDependencyFiles(licensesDir, "groovy"); - - Path groovySha = Files.list(licensesDir.toPath()).filter(file -> file.toFile().getName().contains("sha")).findFirst().get(); - - Files.write(groovySha, new byte[] { 1 }, StandardOpenOption.CREATE); - - task.get().checkDependencies(); - } - @Test public void givenProjectWithADependencyMappingThenShouldReturnSilently() throws Exception { project.getDependencies().add("implementation", dependency); @@ -261,14 +208,6 @@ public void givenProjectWithAIgnoreShaConfigurationAndNoShaFileThenShouldReturnS task.get().checkDependencies(); } - @Test - public void givenProjectWithoutLicensesDirWhenAskingForShaFilesThenShouldThrowException() { - expectedException.expect(GradleException.class); - expectedException.expectMessage(containsString("isn't a valid directory")); - - task.get().getShaFiles(); - } - private Project createProject() { Project project = ProjectBuilder.builder().build(); project.getPlugins().apply(JavaPlugin.class); @@ -276,11 +215,9 @@ private Project createProject() { return project; } - private void createAllDefaultDependencyFiles(File licensesDir, String dependencyName) throws IOException, NoSuchAlgorithmException { + private void createAllDefaultDependencyFiles(File licensesDir, String dependencyName) throws IOException { createFileIn(licensesDir, dependencyName + "-LICENSE.txt", PERMISSIVE_LICENSE_TEXT); createFileIn(licensesDir, dependencyName + "-NOTICE.txt", ""); - - updateShas.updateShas(); } private File getLicensesDir(Project project) { @@ -300,13 +237,6 @@ private void createFileIn(File parent, String name, String content) throws IOExc Files.write(file, content.getBytes(StandardCharsets.UTF_8)); } - private UpdateShasTask createUpdateShasTask(Project project, TaskProvider dependencyLicensesTask) { - UpdateShasTask task = project.getTasks().register("updateShas", UpdateShasTask.class).get(); - - task.setParentTask(dependencyLicensesTask); - return task; - } - private TaskProvider createDependencyLicensesTask(Project project) { TaskProvider task = project.getTasks() .register("dependencyLicenses", DependencyLicensesTask.class, new Action() { diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTaskTests.java deleted file mode 100644 index 174c5d0312486..0000000000000 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTaskTests.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.gradle.internal.precommit; - -import org.apache.commons.io.FileUtils; -import org.gradle.api.GradleException; -import org.gradle.api.Project; -import org.gradle.api.artifacts.Dependency; -import org.gradle.api.file.FileCollection; -import org.gradle.api.plugins.JavaPlugin; -import org.gradle.api.tasks.TaskProvider; -import org.gradle.testfixtures.ProjectBuilder; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; -import java.security.NoSuchAlgorithmException; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class UpdateShasTaskTests { - - public static final String GROOVY_JAR_REGEX = "groovy-\\d\\.\\d+\\.\\d+\\.jar"; - @Rule - public ExpectedException expectedException = ExpectedException.none(); - - private UpdateShasTask task; - - private Project project; - - private Dependency dependency; - - @Before - public void prepare() throws IOException { - project = createProject(); - task = createUpdateShasTask(project); - dependency = project.getDependencies().localGroovy(); - - } - - @Test - public void whenDependencyDoesntExistThenShouldDeleteDependencySha() throws IOException, NoSuchAlgorithmException { - File unusedSha = createFileIn(getLicensesDir(project), "test.sha1", ""); - task.updateShas(); - - assertFalse(unusedSha.exists()); - } - - @Test - public void whenDependencyExistsButShaNotThenShouldCreateNewShaFile() throws IOException, NoSuchAlgorithmException { - project.getDependencies().add("implementation", dependency); - - getLicensesDir(project).mkdir(); - task.updateShas(); - Path groovySha = Files.list(getLicensesDir(project).toPath()) - .filter(p -> p.toFile().getName().matches(GROOVY_JAR_REGEX + ".sha1")) - .findFirst() - .get(); - assertTrue(groovySha.toFile().getName().startsWith("groovy")); - } - - @Test - public void whenDependencyAndWrongShaExistsThenShouldNotOverwriteShaFile() throws IOException, NoSuchAlgorithmException { - project.getDependencies().add("implementation", dependency); - File groovyJar = task.getParentTask() - .getDependencies() - .getFiles() - .stream() - .filter(f -> f.getName().matches(GROOVY_JAR_REGEX)) - .findFirst() - .get(); - String groovyShaName = groovyJar.getName() + ".sha1"; - File groovySha = createFileIn(getLicensesDir(project), groovyShaName, "content"); - task.updateShas(); - assertThat(FileUtils.readFileToString(groovySha), equalTo("content")); - } - - @Test - public void whenLicensesDirDoesntExistThenShouldThrowException() throws IOException, NoSuchAlgorithmException { - expectedException.expect(GradleException.class); - expectedException.expectMessage(containsString("isn't a valid directory")); - - task.updateShas(); - } - - private Project createProject() { - Project project = ProjectBuilder.builder().build(); - project.getPlugins().apply(JavaPlugin.class); - - return project; - } - - private File getLicensesDir(Project project) { - return getFile(project, "licenses"); - } - - private File getFile(Project project, String fileName) { - return project.getProjectDir().toPath().resolve(fileName).toFile(); - } - - private File createFileIn(File parent, String name, String content) throws IOException { - parent.mkdir(); - - Path path = parent.toPath().resolve(name); - File file = path.toFile(); - - Files.write(path, content.getBytes(), StandardOpenOption.CREATE); - - return file; - } - - private UpdateShasTask createUpdateShasTask(Project project) { - UpdateShasTask task = project.getTasks().register("updateShas", UpdateShasTask.class).get(); - - task.setParentTask(createDependencyLicensesTask(project)); - return task; - } - - private TaskProvider createDependencyLicensesTask(Project project) { - return project.getTasks() - .register( - "dependencyLicenses", - DependencyLicensesTask.class, - dependencyLicensesTask -> dependencyLicensesTask.setDependencies(getDependencies(project)) - ); - } - - private FileCollection getDependencies(Project project) { - return project.getConfigurations().getByName("compileClasspath"); - } -} diff --git a/client/rest/licenses/commons-codec-1.14.jar.sha1 b/client/rest/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/client/rest/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/client/rest/licenses/commons-logging-1.1.3.jar.sha1 b/client/rest/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/client/rest/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 deleted file mode 100644 index 366a9e31069a6..0000000000000 --- a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd18227f1eb8e9a263286c1d7362ceb24f6f9b32 \ No newline at end of file diff --git a/client/rest/licenses/httpclient-4.5.13.jar.sha1 b/client/rest/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/client/rest/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/client/rest/licenses/httpcore-4.4.13.jar.sha1 b/client/rest/licenses/httpcore-4.4.13.jar.sha1 deleted file mode 100644 index 0cb64863b9760..0000000000000 --- a/client/rest/licenses/httpcore-4.4.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -853b96d3afbb7bf8cc303fe27ee96836a10c1834 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-4.4.13.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.13.jar.sha1 deleted file mode 100644 index 7629b7d5584c8..0000000000000 --- a/client/rest/licenses/httpcore-nio-4.4.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f897ace4d7f10f0ea6a58f524a3b105dd483653 \ No newline at end of file diff --git a/client/sniffer/licenses/commons-codec-1.14.jar.sha1 b/client/sniffer/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/client/sniffer/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 b/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 b/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.13.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.13.jar.sha1 deleted file mode 100644 index 0cb64863b9760..0000000000000 --- a/client/sniffer/licenses/httpcore-4.4.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -853b96d3afbb7bf8cc303fe27ee96836a10c1834 \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.13.2.jar.sha1 b/client/sniffer/licenses/jackson-core-2.13.2.jar.sha1 deleted file mode 100644 index eb8a8bc45f041..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6a0e0620d51833feffc67bccb51937b2345763 \ No newline at end of file diff --git a/distribution/tools/ansi-console/licenses/jansi-2.4.0.jar.sha1 b/distribution/tools/ansi-console/licenses/jansi-2.4.0.jar.sha1 deleted file mode 100644 index 37ca74b255dcf..0000000000000 --- a/distribution/tools/ansi-console/licenses/jansi-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -321c614f85f1dea6bb08c1817c60d53b7f3552fd \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.jar.sha1 deleted file mode 100644 index 425b11ee6c13f..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4fb5db5f03d00f6a94e43b78d097978190e4abb2 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.4.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.4.jar.sha1 deleted file mode 100644 index 7aec78e9e6f07..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a838a87959d9c2cee658f4a4e1869e28f6b9976 \ No newline at end of file diff --git a/gradle.properties b/gradle.properties index 15e46b620babb..22d542454d68f 100644 --- a/gradle.properties +++ b/gradle.properties @@ -17,3 +17,6 @@ systemProp.jdk.tls.client.protocols=TLSv1.2 # java homes resolved by environment variables org.gradle.java.installations.auto-detect=false org.gradle.java.installations.fromEnv=JAVA_TOOLCHAIN_HOME,JAVA_HOME,RUNTIME_JAVA_HOME,JAVA19_HOME,JAVA18_HOME,JAVA17_HOME,JAVA16_HOME,JAVA15_HOME,JAVA14_HOME,JAVA13_HOME,JAVA12_HOME,JAVA11_HOME,JAVA8_HOME + +# log some dependency verification info to console +org.gradle.dependency.verification.console=verbose \ No newline at end of file diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index ce56380f18e50..a80a007db9901 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -25,10 +25,10 @@ json-assert = "org.skyscreamer:jsonassert:1.5.0" jackson-dataformat-yaml = { group = "com.fasterxml.jackson.dataformat", name="jackson-dataformat-yaml", version.ref="jackson" } jackson-platform = { group = "com.fasterxml.jackson", name="jackson-bom", version.ref="jackson" } jna = "net.java.dev.jna:jna:5.10.0" -junit = "junit:junit:4.12" +junit = "junit:junit:4.13.2" junit5-platform = { group = "org.junit", name="junit-bom", version.ref="junit5" } junit5-jupiter = { group = "org.junit.jupiter", name="junit-jupiter", version.ref="junit5" } -junit5-platform-launcher = "org.junit.platform:junit-platform-launcher:1.8.0" +junit5-platform-launcher = "org.junit.platform:junit-platform-launcher:1.8.1" junit5-vintage = { group = "org.junit.vintage", name="junit-vintage-engine", version.ref="junit5" } maven-model = "org.apache.maven:maven-model:3.6.2" mockito-core = "org.mockito:mockito-core:1.9.5" diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml new file mode 100644 index 0000000000000..11942b0e49d5a --- /dev/null +++ b/gradle/verification-metadata.xml @@ -0,0 +1,3600 @@ + + + + false + false + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/libs/cli/licenses/jopt-simple-5.0.2.jar.sha1 b/libs/cli/licenses/jopt-simple-5.0.2.jar.sha1 deleted file mode 100644 index b50ed4fea3bd1..0000000000000 --- a/libs/cli/licenses/jopt-simple-5.0.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98cafc6081d5632b61be2c9e60650b64ddbc637c \ No newline at end of file diff --git a/libs/grok/licenses/jcodings-1.0.44.jar.sha1 b/libs/grok/licenses/jcodings-1.0.44.jar.sha1 deleted file mode 100644 index 4449009d3395e..0000000000000 --- a/libs/grok/licenses/jcodings-1.0.44.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6884b2fd8fd9a56874db05afaa22435043a2e3e \ No newline at end of file diff --git a/libs/grok/licenses/joni-2.1.29.jar.sha1 b/libs/grok/licenses/joni-2.1.29.jar.sha1 deleted file mode 100644 index 251ff2ec05a19..0000000000000 --- a/libs/grok/licenses/joni-2.1.29.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb751702e1194ff24259155db4d37e9383d4320 \ No newline at end of file diff --git a/libs/lz4/licenses/lz4-java-1.8.0.jar.sha1 b/libs/lz4/licenses/lz4-java-1.8.0.jar.sha1 deleted file mode 100644 index 5e3536d1b7d29..0000000000000 --- a/libs/lz4/licenses/lz4-java-1.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b986a99445e49ea5fbf5d149c4b63f6ed6c6780 \ No newline at end of file diff --git a/libs/x-content/impl/licenses/jackson-core-2.13.2.jar.sha1 b/libs/x-content/impl/licenses/jackson-core-2.13.2.jar.sha1 deleted file mode 100644 index eb8a8bc45f041..0000000000000 --- a/libs/x-content/impl/licenses/jackson-core-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6a0e0620d51833feffc67bccb51937b2345763 \ No newline at end of file diff --git a/libs/x-content/impl/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 b/libs/x-content/impl/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 deleted file mode 100644 index 3a4f0e1b17565..0000000000000 --- a/libs/x-content/impl/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4fc77e1ec6922fc48bf1181e4b38f600dac222ff \ No newline at end of file diff --git a/libs/x-content/impl/licenses/jackson-dataformat-smile-2.13.2.jar.sha1 b/libs/x-content/impl/licenses/jackson-dataformat-smile-2.13.2.jar.sha1 deleted file mode 100644 index 86a53f72de66e..0000000000000 --- a/libs/x-content/impl/licenses/jackson-dataformat-smile-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -984bb22f310ebbedc967d206e672f8acf766a98e \ No newline at end of file diff --git a/libs/x-content/impl/licenses/jackson-dataformat-yaml-2.13.2.jar.sha1 b/libs/x-content/impl/licenses/jackson-dataformat-yaml-2.13.2.jar.sha1 deleted file mode 100644 index 1cba175acf2ae..0000000000000 --- a/libs/x-content/impl/licenses/jackson-dataformat-yaml-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5601496b5b6e43d947aeeffbffadb2b18961c731 \ No newline at end of file diff --git a/libs/x-content/impl/licenses/snakeyaml-1.30.jar.sha1 b/libs/x-content/impl/licenses/snakeyaml-1.30.jar.sha1 deleted file mode 100644 index 02efe0ab45c0a..0000000000000 --- a/libs/x-content/impl/licenses/snakeyaml-1.30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8fde7fe2586328ac3c68db92045e1c8759125000 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 b/modules/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 deleted file mode 100644 index 5f1d015b87ac7..0000000000000 --- a/modules/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8467c813d442837fcaeddbc42cf5c5359fab4933 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/apache-mime4j-core-0.8.5.jar.sha1 b/modules/ingest-attachment/licenses/apache-mime4j-core-0.8.5.jar.sha1 deleted file mode 100644 index f73bbd03803c3..0000000000000 --- a/modules/ingest-attachment/licenses/apache-mime4j-core-0.8.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0fc7258f948358c8caace27b9b191437a50a7ecc \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/apache-mime4j-dom-0.8.5.jar.sha1 b/modules/ingest-attachment/licenses/apache-mime4j-dom-0.8.5.jar.sha1 deleted file mode 100644 index 1625e7d33617e..0000000000000 --- a/modules/ingest-attachment/licenses/apache-mime4j-dom-0.8.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6808f50c447fb033b334ca5ca25830647d85abe1 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/commons-codec-1.14.jar.sha1 b/modules/ingest-attachment/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/modules/ingest-attachment/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/commons-collections4-4.1.jar.sha1 b/modules/ingest-attachment/licenses/commons-collections4-4.1.jar.sha1 deleted file mode 100644 index f054416580624..0000000000000 --- a/modules/ingest-attachment/licenses/commons-collections4-4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4cf4688fe1c7e3a63aa636cc96d013af537768e \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/commons-compress-1.21.jar.sha1 b/modules/ingest-attachment/licenses/commons-compress-1.21.jar.sha1 deleted file mode 100644 index 81ac609a1aa26..0000000000000 --- a/modules/ingest-attachment/licenses/commons-compress-1.21.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ec95b60d4e86b5c95a0e919cb172a0af98011ef \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/commons-io-2.11.0.jar.sha1 b/modules/ingest-attachment/licenses/commons-io-2.11.0.jar.sha1 deleted file mode 100644 index 8adec30bade49..0000000000000 --- a/modules/ingest-attachment/licenses/commons-io-2.11.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a2503f302b11ebde7ebc3df41daebe0e4eea3689 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/commons-lang3-3.9.jar.sha1 b/modules/ingest-attachment/licenses/commons-lang3-3.9.jar.sha1 deleted file mode 100644 index 2adcfd377f87c..0000000000000 --- a/modules/ingest-attachment/licenses/commons-lang3-3.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0122c7cee69b53ed4a7681c03d4ee4c0e2765da5 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/commons-logging-1.1.3.jar.sha1 b/modules/ingest-attachment/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/modules/ingest-attachment/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/commons-math3-3.6.1.jar.sha1 b/modules/ingest-attachment/licenses/commons-math3-3.6.1.jar.sha1 deleted file mode 100644 index 72975be4c8851..0000000000000 --- a/modules/ingest-attachment/licenses/commons-math3-3.6.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4ba98f1d4b3c80ec46392f25e094a6a2e58fcbf \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/fontbox-2.0.26.jar.sha1 b/modules/ingest-attachment/licenses/fontbox-2.0.26.jar.sha1 deleted file mode 100644 index 88d77440cf5a5..0000000000000 --- a/modules/ingest-attachment/licenses/fontbox-2.0.26.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4e416198adde54b753e41d3312f799640dac5687 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/jempbox-1.8.16.jar.sha1 b/modules/ingest-attachment/licenses/jempbox-1.8.16.jar.sha1 deleted file mode 100644 index aba5a49037c48..0000000000000 --- a/modules/ingest-attachment/licenses/jempbox-1.8.16.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1f41de81768ef84ca2d8cda4cb79e9272c8ee966 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/juniversalchardet-1.0.3.jar.sha1 b/modules/ingest-attachment/licenses/juniversalchardet-1.0.3.jar.sha1 deleted file mode 100644 index 6b06952678fb3..0000000000000 --- a/modules/ingest-attachment/licenses/juniversalchardet-1.0.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd49678784c46aa8789c060538e0154013bb421b diff --git a/modules/ingest-attachment/licenses/pdfbox-2.0.26.jar.sha1 b/modules/ingest-attachment/licenses/pdfbox-2.0.26.jar.sha1 deleted file mode 100644 index 2c0c3a28ceba6..0000000000000 --- a/modules/ingest-attachment/licenses/pdfbox-2.0.26.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -67b85a6aea4a1c846448e3513161d6c260d6e0d9 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/poi-5.2.2.jar.sha1 b/modules/ingest-attachment/licenses/poi-5.2.2.jar.sha1 deleted file mode 100644 index d9f58e72c9200..0000000000000 --- a/modules/ingest-attachment/licenses/poi-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5513d31545085c33809c4b6553c2009fd19a6016 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 b/modules/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 deleted file mode 100644 index 7b3abffc1abd5..0000000000000 --- a/modules/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a201b5bdc92c0fae4bed4b8e5546388c4c2f9eb0 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 b/modules/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 deleted file mode 100644 index f5137b1e5223e..0000000000000 --- a/modules/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5df31b69375131fc2163a5557093cb112be90ce1 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 b/modules/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 deleted file mode 100644 index 568dde5125c3f..0000000000000 --- a/modules/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c5cd5f1b3e7b3656ab983b73bbbf8bf5f14f793 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 b/modules/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/modules/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/tagsoup-1.2.1.jar.sha1 b/modules/ingest-attachment/licenses/tagsoup-1.2.1.jar.sha1 deleted file mode 100644 index 5d227b11a0fa6..0000000000000 --- a/modules/ingest-attachment/licenses/tagsoup-1.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5584627487e984c03456266d3f8802eb85a9ce97 diff --git a/modules/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 b/modules/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 deleted file mode 100644 index 373b7ec63138a..0000000000000 --- a/modules/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97b2454943127857a8304319be658d6d7ff4fff1 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/tika-langdetect-tika-2.4.0.jar.sha1 b/modules/ingest-attachment/licenses/tika-langdetect-tika-2.4.0.jar.sha1 deleted file mode 100644 index 4b530315d5012..0000000000000 --- a/modules/ingest-attachment/licenses/tika-langdetect-tika-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f36cad41d61ad3a49e61ca6a2327cf364ec110e1 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/tika-parser-apple-module-2.4.0.jar.sha1 b/modules/ingest-attachment/licenses/tika-parser-apple-module-2.4.0.jar.sha1 deleted file mode 100644 index ff5cc8a2b2cb3..0000000000000 --- a/modules/ingest-attachment/licenses/tika-parser-apple-module-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6d3fcb9c539fde6ba7f175a82fa14466e69ba7a2 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/tika-parser-html-module-2.4.0.jar.sha1 b/modules/ingest-attachment/licenses/tika-parser-html-module-2.4.0.jar.sha1 deleted file mode 100644 index fe7c5256f8f38..0000000000000 --- a/modules/ingest-attachment/licenses/tika-parser-html-module-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a220afa62cc703233fe9b1787128e4391f59c5 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/tika-parser-microsoft-module-2.4.0.jar.sha1 b/modules/ingest-attachment/licenses/tika-parser-microsoft-module-2.4.0.jar.sha1 deleted file mode 100644 index f1585c2706fcc..0000000000000 --- a/modules/ingest-attachment/licenses/tika-parser-microsoft-module-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -433b17482c209554449abc5503e65b9e1feeefbc \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/tika-parser-miscoffice-module-2.4.0.jar.sha1 b/modules/ingest-attachment/licenses/tika-parser-miscoffice-module-2.4.0.jar.sha1 deleted file mode 100644 index 361a8836c07d5..0000000000000 --- a/modules/ingest-attachment/licenses/tika-parser-miscoffice-module-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ada9deb2ef1cd17c1b4313147dac5aa4b965e6d \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/tika-parser-pdf-module-2.4.0.jar.sha1 b/modules/ingest-attachment/licenses/tika-parser-pdf-module-2.4.0.jar.sha1 deleted file mode 100644 index bda4a8f13c2a6..0000000000000 --- a/modules/ingest-attachment/licenses/tika-parser-pdf-module-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0fda489271d30f6f0e3f7f92908d029f1b76c5e2 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/tika-parser-text-module-2.4.0.jar.sha1 b/modules/ingest-attachment/licenses/tika-parser-text-module-2.4.0.jar.sha1 deleted file mode 100644 index 029270ce25a10..0000000000000 --- a/modules/ingest-attachment/licenses/tika-parser-text-module-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6b18b232d4ae95ab67839f46f3e8413e7cc12eab \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/tika-parser-xml-module-2.4.0.jar.sha1 b/modules/ingest-attachment/licenses/tika-parser-xml-module-2.4.0.jar.sha1 deleted file mode 100644 index 205887a0a9b59..0000000000000 --- a/modules/ingest-attachment/licenses/tika-parser-xml-module-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -181647dc6b748be73410f8a624a8b279f2e75407 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/tika-parser-xmp-commons-2.4.0.jar.sha1 b/modules/ingest-attachment/licenses/tika-parser-xmp-commons-2.4.0.jar.sha1 deleted file mode 100644 index 556c696d5a8ae..0000000000000 --- a/modules/ingest-attachment/licenses/tika-parser-xmp-commons-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6fe6806d2604441f770ad775f6cba2fc6e1032b9 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/tika-parser-zip-commons-2.4.0.jar.sha1 b/modules/ingest-attachment/licenses/tika-parser-zip-commons-2.4.0.jar.sha1 deleted file mode 100644 index c7f936e21357e..0000000000000 --- a/modules/ingest-attachment/licenses/tika-parser-zip-commons-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a35f88b193c8e43bdf909e022a0325b306a43c87 \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 b/modules/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 deleted file mode 100644 index 7451ee17640d6..0000000000000 --- a/modules/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1ef1382ae9dfb2438b82b6dd575566355c2f30f \ No newline at end of file diff --git a/modules/ingest-attachment/licenses/xz-1.8.jar.sha1 b/modules/ingest-attachment/licenses/xz-1.8.jar.sha1 deleted file mode 100644 index 7455feac7983b..0000000000000 --- a/modules/ingest-attachment/licenses/xz-1.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4f7d054303948eb6a4066194253886c8af07128 \ No newline at end of file diff --git a/modules/ingest-common/licenses/httpclient-4.5.13.jar.sha1 b/modules/ingest-common/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/modules/ingest-common/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/modules/ingest-common/licenses/httpcore-4.4.13.jar.sha1 b/modules/ingest-common/licenses/httpcore-4.4.13.jar.sha1 deleted file mode 100644 index 0cb64863b9760..0000000000000 --- a/modules/ingest-common/licenses/httpcore-4.4.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -853b96d3afbb7bf8cc303fe27ee96836a10c1834 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/geoip2-3.0.0.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-3.0.0.jar.sha1 deleted file mode 100644 index 7c4dbcb2718ab..0000000000000 --- a/modules/ingest-geoip/licenses/geoip2-3.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -523b0d63f3dbeacb7dfceb7431cb17fa56cf79fa \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.13.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.13.1.jar.sha1 deleted file mode 100644 index 4a19d003fdc41..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.13.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1cbcbe4623113e6af92ccaa89884a345270f1a87 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-core-2.13.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-core-2.13.1.jar.sha1 deleted file mode 100644 index 16721b87d5a88..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-core-2.13.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51ae921a2ed1e06ca8876f12f32f265e83c0b2b8 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.13.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.13.1.jar.sha1 deleted file mode 100644 index 1939eea75f4ef..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.13.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -698b2d2b15d9a1b7aae025f1d9f576842285e7f6 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 deleted file mode 100644 index 32c18f89c6a29..0000000000000 --- a/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7e0fd82da0a160b7928ba214e699a7e6a74fff4 \ No newline at end of file diff --git a/modules/lang-expression/licenses/antlr4-runtime-4.5.1-1.jar.sha1 b/modules/lang-expression/licenses/antlr4-runtime-4.5.1-1.jar.sha1 deleted file mode 100644 index f15e50069ba63..0000000000000 --- a/modules/lang-expression/licenses/antlr4-runtime-4.5.1-1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -66144204f9d6d7d3f3f775622c2dd7e9bd511d97 diff --git a/modules/lang-expression/licenses/asm-7.2.jar.sha1 b/modules/lang-expression/licenses/asm-7.2.jar.sha1 deleted file mode 100644 index acb97fc1a0249..0000000000000 --- a/modules/lang-expression/licenses/asm-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fa637eb67eb7628c915d73762b681ae7ff0b9731 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-analysis-7.2.jar.sha1 b/modules/lang-expression/licenses/asm-analysis-7.2.jar.sha1 deleted file mode 100644 index 849b5e0bfa671..0000000000000 --- a/modules/lang-expression/licenses/asm-analysis-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6e6abe057f23630113f4167c34bda7086691258 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-7.2.jar.sha1 b/modules/lang-expression/licenses/asm-commons-7.2.jar.sha1 deleted file mode 100644 index b634981fc89ac..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ca2954e8d92a05bacc28ff465b25c70e0f512497 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-7.2.jar.sha1 b/modules/lang-expression/licenses/asm-tree-7.2.jar.sha1 deleted file mode 100644 index 986a1c55f5e8f..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a23cc36edaf8fc5a89cb100182758ccb5991487 \ No newline at end of file diff --git a/modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 deleted file mode 100644 index 6336318c2ce1a..0000000000000 --- a/modules/lang-mustache/licenses/compiler-0.9.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6111ae24e3be9ecbd75f5fe908583fc14b4f0174 \ No newline at end of file diff --git a/modules/lang-painless/licenses/antlr4-runtime-4.5.3.jar.sha1 b/modules/lang-painless/licenses/antlr4-runtime-4.5.3.jar.sha1 deleted file mode 100644 index 535955b7d6826..0000000000000 --- a/modules/lang-painless/licenses/antlr4-runtime-4.5.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2609e36f18f7e8d593cc1cddfb2ac776dc96b8e0 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-7.2.jar.sha1 b/modules/lang-painless/licenses/asm-7.2.jar.sha1 deleted file mode 100644 index acb97fc1a0249..0000000000000 --- a/modules/lang-painless/licenses/asm-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fa637eb67eb7628c915d73762b681ae7ff0b9731 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-7.2.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-7.2.jar.sha1 deleted file mode 100644 index 849b5e0bfa671..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6e6abe057f23630113f4167c34bda7086691258 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-7.2.jar.sha1 b/modules/lang-painless/licenses/asm-commons-7.2.jar.sha1 deleted file mode 100644 index b634981fc89ac..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ca2954e8d92a05bacc28ff465b25c70e0f512497 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-7.2.jar.sha1 b/modules/lang-painless/licenses/asm-tree-7.2.jar.sha1 deleted file mode 100644 index 986a1c55f5e8f..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a23cc36edaf8fc5a89cb100182758ccb5991487 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-7.2.jar.sha1 b/modules/lang-painless/licenses/asm-util-7.2.jar.sha1 deleted file mode 100644 index 6f70a0eea65ab..0000000000000 --- a/modules/lang-painless/licenses/asm-util-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3ae34e57fa8a4040e28247291d0cc3d6b8c7bcf \ No newline at end of file diff --git a/modules/legacy-geo/licenses/jackson-core-2.13.2.jar.sha1 b/modules/legacy-geo/licenses/jackson-core-2.13.2.jar.sha1 deleted file mode 100644 index eb8a8bc45f041..0000000000000 --- a/modules/legacy-geo/licenses/jackson-core-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6a0e0620d51833feffc67bccb51937b2345763 \ No newline at end of file diff --git a/modules/legacy-geo/licenses/jts-core-1.15.0.jar.sha1 b/modules/legacy-geo/licenses/jts-core-1.15.0.jar.sha1 deleted file mode 100644 index 32e262511c0ef..0000000000000 --- a/modules/legacy-geo/licenses/jts-core-1.15.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -705981b7e25d05a76a3654e597dab6ba423eb79e \ No newline at end of file diff --git a/modules/legacy-geo/licenses/s2-geometry-library-java-1.0.1.jar.sha1 b/modules/legacy-geo/licenses/s2-geometry-library-java-1.0.1.jar.sha1 deleted file mode 100644 index 67b2eb2ab7a7c..0000000000000 --- a/modules/legacy-geo/licenses/s2-geometry-library-java-1.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84d3b2d97dd176bd705e4968a88fba0ea30fe991 \ No newline at end of file diff --git a/modules/legacy-geo/licenses/spatial4j-0.7.jar.sha1 b/modules/legacy-geo/licenses/spatial4j-0.7.jar.sha1 deleted file mode 100644 index 2244eb6800408..0000000000000 --- a/modules/legacy-geo/licenses/spatial4j-0.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -faa8ba85d503da4ab872d17ba8c00da0098ab2f2 \ No newline at end of file diff --git a/modules/repository-azure/licenses/azure-core-1.27.0.jar.sha1 b/modules/repository-azure/licenses/azure-core-1.27.0.jar.sha1 deleted file mode 100644 index 9206b697ca648..0000000000000 --- a/modules/repository-azure/licenses/azure-core-1.27.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -75a2db538d218e2bd3c2cbdf04c955b8f6db6626 \ No newline at end of file diff --git a/modules/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 b/modules/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 deleted file mode 100644 index 936a02dfba4d7..0000000000000 --- a/modules/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d1f34b3e60db038f3913007a2706a820383dc26 \ No newline at end of file diff --git a/modules/repository-azure/licenses/azure-storage-blob-12.16.0.jar.sha1 b/modules/repository-azure/licenses/azure-storage-blob-12.16.0.jar.sha1 deleted file mode 100644 index 349d190bbbac3..0000000000000 --- a/modules/repository-azure/licenses/azure-storage-blob-12.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74b92065815f7affb0cd7897b683369b9ed982fd \ No newline at end of file diff --git a/modules/repository-azure/licenses/azure-storage-common-12.15.1.jar.sha1 b/modules/repository-azure/licenses/azure-storage-common-12.15.1.jar.sha1 deleted file mode 100644 index 84946ab301b20..0000000000000 --- a/modules/repository-azure/licenses/azure-storage-common-12.15.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -34f9c0563714e666ee6f44430152f46a5f760932 \ No newline at end of file diff --git a/modules/repository-azure/licenses/jackson-annotations-2.13.2.jar.sha1 b/modules/repository-azure/licenses/jackson-annotations-2.13.2.jar.sha1 deleted file mode 100644 index ecd3fb49d5b12..0000000000000 --- a/modules/repository-azure/licenses/jackson-annotations-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec18851f1976d5b810ae1a5fcc32520d2d38f77a \ No newline at end of file diff --git a/modules/repository-azure/licenses/jackson-core-2.13.2.jar.sha1 b/modules/repository-azure/licenses/jackson-core-2.13.2.jar.sha1 deleted file mode 100644 index eb8a8bc45f041..0000000000000 --- a/modules/repository-azure/licenses/jackson-core-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6a0e0620d51833feffc67bccb51937b2345763 \ No newline at end of file diff --git a/modules/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1 b/modules/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1 deleted file mode 100644 index 9d9266300feef..0000000000000 --- a/modules/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ffeb635597d093509f33e1e94274d14be610f933 \ No newline at end of file diff --git a/modules/repository-azure/licenses/jackson-dataformat-xml-2.13.2.jar.sha1 b/modules/repository-azure/licenses/jackson-dataformat-xml-2.13.2.jar.sha1 deleted file mode 100644 index 7d020f81a91ba..0000000000000 --- a/modules/repository-azure/licenses/jackson-dataformat-xml-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb6a722f128ff0ce2494384d419b6ff20fad25ab \ No newline at end of file diff --git a/modules/repository-azure/licenses/jackson-datatype-jsr310-2.13.2.jar.sha1 b/modules/repository-azure/licenses/jackson-datatype-jsr310-2.13.2.jar.sha1 deleted file mode 100644 index 979d38bb38784..0000000000000 --- a/modules/repository-azure/licenses/jackson-datatype-jsr310-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cddd9380efd4b81ea01e98be8fbdc9765a81793b \ No newline at end of file diff --git a/modules/repository-azure/licenses/jackson-module-jaxb-annotations-2.13.2.jar.sha1 b/modules/repository-azure/licenses/jackson-module-jaxb-annotations-2.13.2.jar.sha1 deleted file mode 100644 index c71c4fe5ee90c..0000000000000 --- a/modules/repository-azure/licenses/jackson-module-jaxb-annotations-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e2f198c512f0f0ccbd6d618baecc9dde9975eadf \ No newline at end of file diff --git a/modules/repository-azure/licenses/jakarta.activation-api-1.2.1.jar.sha1 b/modules/repository-azure/licenses/jakarta.activation-api-1.2.1.jar.sha1 deleted file mode 100644 index de507235999c0..0000000000000 --- a/modules/repository-azure/licenses/jakarta.activation-api-1.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -562a587face36ec7eff2db7f2fc95425c6602bc1 \ No newline at end of file diff --git a/modules/repository-azure/licenses/jakarta.xml.bind-api-2.3.2.jar.sha1 b/modules/repository-azure/licenses/jakarta.xml.bind-api-2.3.2.jar.sha1 deleted file mode 100644 index c66f654e9b56c..0000000000000 --- a/modules/repository-azure/licenses/jakarta.xml.bind-api-2.3.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d49996a4338670764d7ca4b85a1c4ccf7fe665d \ No newline at end of file diff --git a/modules/repository-azure/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 b/modules/repository-azure/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 deleted file mode 100644 index f47bae03e8ea2..0000000000000 --- a/modules/repository-azure/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0ea6ef49f1349bb30e8c6e8a7052d0f3ee7a719 \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-buffer-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-buffer-4.1.77.Final.jar.sha1 deleted file mode 100644 index c3ead4fa2346c..0000000000000 --- a/modules/repository-azure/licenses/netty-buffer-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d97571f99e5e739d86824d0df99f35d295276b5f \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-codec-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-codec-4.1.77.Final.jar.sha1 deleted file mode 100644 index 9bf5943c8f935..0000000000000 --- a/modules/repository-azure/licenses/netty-codec-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4efc5f59335301d6ba0d7cd31dd10651119b03c8 \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-codec-dns-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-codec-dns-4.1.77.Final.jar.sha1 deleted file mode 100644 index 6ef28c444ce29..0000000000000 --- a/modules/repository-azure/licenses/netty-codec-dns-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a0a9bc85703efbab626fb8642e08e221b59dc604 \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-codec-http-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-codec-http-4.1.77.Final.jar.sha1 deleted file mode 100644 index ba358e7de3ee1..0000000000000 --- a/modules/repository-azure/licenses/netty-codec-http-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5ac5afa9af5b4dc0e8bdbfd686979af77ebdb3c \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-codec-http2-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-codec-http2-4.1.77.Final.jar.sha1 deleted file mode 100644 index 16afbe488f68d..0000000000000 --- a/modules/repository-azure/licenses/netty-codec-http2-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e58eeeacc74f8ad2b2acb240b1f01d2c40159d7 \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-codec-socks-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-codec-socks-4.1.77.Final.jar.sha1 deleted file mode 100644 index 6b621a745c62a..0000000000000 --- a/modules/repository-azure/licenses/netty-codec-socks-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -17bb510aa545fc73a18ab804c594593e32de1a1d \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-common-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-common-4.1.77.Final.jar.sha1 deleted file mode 100644 index c8a4b9043b3e6..0000000000000 --- a/modules/repository-azure/licenses/netty-common-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea0fc20f4e6178966b9d62017b7fcb83dfe0e713 \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-handler-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-handler-4.1.77.Final.jar.sha1 deleted file mode 100644 index 0b16b55ff5480..0000000000000 --- a/modules/repository-azure/licenses/netty-handler-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47a81089de03635a27f509f3e4e13386ae1db275 \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-handler-proxy-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-handler-proxy-4.1.77.Final.jar.sha1 deleted file mode 100644 index dfdbec1acaee5..0000000000000 --- a/modules/repository-azure/licenses/netty-handler-proxy-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d1ac0d95b770098c46b6679fbfd417ae277012d4 \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-resolver-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-resolver-4.1.77.Final.jar.sha1 deleted file mode 100644 index c12c9ba9617b8..0000000000000 --- a/modules/repository-azure/licenses/netty-resolver-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4a239dbf8d8bb5f98aa51462c35011c0516395fd \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-resolver-dns-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-resolver-dns-4.1.77.Final.jar.sha1 deleted file mode 100644 index 858b004ae28df..0000000000000 --- a/modules/repository-azure/licenses/netty-resolver-dns-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aad506ab6804e2720771634e2de2a065fa678126 \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-transport-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-transport-4.1.77.Final.jar.sha1 deleted file mode 100644 index f0593c4e6b579..0000000000000 --- a/modules/repository-azure/licenses/netty-transport-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a3373bbd20d520c821f210bd5ee886788512043 \ No newline at end of file diff --git a/modules/repository-azure/licenses/netty-transport-native-unix-common-4.1.77.Final.jar.sha1 b/modules/repository-azure/licenses/netty-transport-native-unix-common-4.1.77.Final.jar.sha1 deleted file mode 100644 index 6de047b851e38..0000000000000 --- a/modules/repository-azure/licenses/netty-transport-native-unix-common-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c95d53486414b3270d08057957c5da8e0c37e4eb \ No newline at end of file diff --git a/modules/repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 b/modules/repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 deleted file mode 100644 index 77210f7c7b402..0000000000000 --- a/modules/repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d9fb7a7926ffa635b3dcaa5049fb2bfa25b3e7d0 \ No newline at end of file diff --git a/modules/repository-azure/licenses/reactor-core-3.4.14.jar.sha1 b/modules/repository-azure/licenses/reactor-core-3.4.14.jar.sha1 deleted file mode 100644 index a5b9783b20a63..0000000000000 --- a/modules/repository-azure/licenses/reactor-core-3.4.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78549cb21d2ca677807ac2863600bdaeb661601a \ No newline at end of file diff --git a/modules/repository-azure/licenses/reactor-netty-core-1.0.15.jar.sha1 b/modules/repository-azure/licenses/reactor-netty-core-1.0.15.jar.sha1 deleted file mode 100644 index 9c6e3da050646..0000000000000 --- a/modules/repository-azure/licenses/reactor-netty-core-1.0.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -26b2e2f5cb3e2350ea67dbfd8e68a54756057c12 \ No newline at end of file diff --git a/modules/repository-azure/licenses/reactor-netty-http-1.0.15.jar.sha1 b/modules/repository-azure/licenses/reactor-netty-http-1.0.15.jar.sha1 deleted file mode 100644 index 62b6ed0042e2d..0000000000000 --- a/modules/repository-azure/licenses/reactor-netty-http-1.0.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c536498d90e139769651ab035686a411e5e3fad4 \ No newline at end of file diff --git a/modules/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 b/modules/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/modules/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/modules/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 b/modules/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 deleted file mode 100644 index 2c12704cdc560..0000000000000 --- a/modules/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3f7325c52240418c2ba257b103c3c550e140c83 \ No newline at end of file diff --git a/modules/repository-azure/licenses/woodstox-core-6.2.7.jar.sha1 b/modules/repository-azure/licenses/woodstox-core-6.2.7.jar.sha1 deleted file mode 100644 index 1fe69a9cd8791..0000000000000 --- a/modules/repository-azure/licenses/woodstox-core-6.2.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -86622cfd0a9933628b6b876d0c92589148d3b42e \ No newline at end of file diff --git a/modules/repository-gcs/licenses/api-common-2.2.1.jar.sha1 b/modules/repository-gcs/licenses/api-common-2.2.1.jar.sha1 deleted file mode 100644 index 6e0d3a699465d..0000000000000 --- a/modules/repository-gcs/licenses/api-common-2.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9eb62c522f96befccfbd8c92bafc952eed4417a8 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/commons-codec-1.14.jar.sha1 b/modules/repository-gcs/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/modules/repository-gcs/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/modules/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 b/modules/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/modules/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/modules/repository-gcs/licenses/failureaccess-1.0.1.jar.sha1 b/modules/repository-gcs/licenses/failureaccess-1.0.1.jar.sha1 deleted file mode 100644 index 4798b37e20691..0000000000000 --- a/modules/repository-gcs/licenses/failureaccess-1.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/gax-2.0.0.jar.sha1 b/modules/repository-gcs/licenses/gax-2.0.0.jar.sha1 deleted file mode 100644 index 0e83a9eda350e..0000000000000 --- a/modules/repository-gcs/licenses/gax-2.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bd7a226230bd3f657eb76c5dbbe5a6110988b2d \ No newline at end of file diff --git a/modules/repository-gcs/licenses/gax-httpjson-0.85.0.jar.sha1 b/modules/repository-gcs/licenses/gax-httpjson-0.85.0.jar.sha1 deleted file mode 100644 index 11b597bff12ff..0000000000000 --- a/modules/repository-gcs/licenses/gax-httpjson-0.85.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e830eaeb3897329c002bcb0791ce83ed5a2f674e \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-api-client-1.35.1.jar.sha1 b/modules/repository-gcs/licenses/google-api-client-1.35.1.jar.sha1 deleted file mode 100644 index 049786fdf4198..0000000000000 --- a/modules/repository-gcs/licenses/google-api-client-1.35.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df81628b335b7e554cdc50a7da6e554651a1ff2e \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-api-services-storage-v1-rev20210127-1.32.1.jar.sha1 b/modules/repository-gcs/licenses/google-api-services-storage-v1-rev20210127-1.32.1.jar.sha1 deleted file mode 100644 index cb402759639be..0000000000000 --- a/modules/repository-gcs/licenses/google-api-services-storage-v1-rev20210127-1.32.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7aa955565b2021860bae99c38f203cdac70531dd \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-auth-library-credentials-1.0.0.jar.sha1 b/modules/repository-gcs/licenses/google-auth-library-credentials-1.0.0.jar.sha1 deleted file mode 100644 index b424fe7a94f0e..0000000000000 --- a/modules/repository-gcs/licenses/google-auth-library-credentials-1.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8490d3c271942021b4ddb8642089a8372e9a7cc7 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-auth-library-oauth2-http-1.0.0.jar.sha1 b/modules/repository-gcs/licenses/google-auth-library-oauth2-http-1.0.0.jar.sha1 deleted file mode 100644 index 53d0e5d1a954c..0000000000000 --- a/modules/repository-gcs/licenses/google-auth-library-oauth2-http-1.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8710934be366e9ecf19ad3f9ca0582a309be0e9 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-cloud-core-2.0.2.jar.sha1 b/modules/repository-gcs/licenses/google-cloud-core-2.0.2.jar.sha1 deleted file mode 100644 index d78e92f5374e0..0000000000000 --- a/modules/repository-gcs/licenses/google-cloud-core-2.0.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f877327228fa58875541bc55bc2fc434c0c3f520 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-cloud-core-http-2.0.2.jar.sha1 b/modules/repository-gcs/licenses/google-cloud-core-http-2.0.2.jar.sha1 deleted file mode 100644 index 17f1f89e7bd4c..0000000000000 --- a/modules/repository-gcs/licenses/google-cloud-core-http-2.0.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b504ce662a6ff5a875492b49f770daaec64b4247 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-cloud-storage-1.118.1.jar.sha1 b/modules/repository-gcs/licenses/google-cloud-storage-1.118.1.jar.sha1 deleted file mode 100644 index f14f6c1aad3b6..0000000000000 --- a/modules/repository-gcs/licenses/google-cloud-storage-1.118.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e3dec53b3d7b2d560dc3f62dd3f711e0d7b4d44 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-http-client-1.39.2.jar.sha1 b/modules/repository-gcs/licenses/google-http-client-1.39.2.jar.sha1 deleted file mode 100644 index 4870e9606ee20..0000000000000 --- a/modules/repository-gcs/licenses/google-http-client-1.39.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5aafc3ff51693febf4214bb2a21baf577ce2fb25 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-http-client-appengine-1.39.2.jar.sha1 b/modules/repository-gcs/licenses/google-http-client-appengine-1.39.2.jar.sha1 deleted file mode 100644 index 924db225f1ffa..0000000000000 --- a/modules/repository-gcs/licenses/google-http-client-appengine-1.39.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -22ba6d92fd2e5c0c9db01848941e2e8bd42943ca \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-http-client-gson-1.39.2.jar.sha1 b/modules/repository-gcs/licenses/google-http-client-gson-1.39.2.jar.sha1 deleted file mode 100644 index aec0283e3edd1..0000000000000 --- a/modules/repository-gcs/licenses/google-http-client-gson-1.39.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43c1d0500c31ee31ff5918ac4bbe95711cd744a9 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-http-client-jackson2-1.39.2.jar.sha1 b/modules/repository-gcs/licenses/google-http-client-jackson2-1.39.2.jar.sha1 deleted file mode 100644 index 170ec10eaf5d2..0000000000000 --- a/modules/repository-gcs/licenses/google-http-client-jackson2-1.39.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4037ca41fe43989a5609158d4ed7a3973de5df36 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/google-oauth-client-1.34.1.jar.sha1 b/modules/repository-gcs/licenses/google-oauth-client-1.34.1.jar.sha1 deleted file mode 100644 index a8434bd380761..0000000000000 --- a/modules/repository-gcs/licenses/google-oauth-client-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4a4f88c5e13143f882268c98239fb85c3b2c6cb2 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/grpc-context-1.39.0.jar.sha1 b/modules/repository-gcs/licenses/grpc-context-1.39.0.jar.sha1 deleted file mode 100644 index 8734ad2f10b57..0000000000000 --- a/modules/repository-gcs/licenses/grpc-context-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -637f453f3654aa29bf085ae7ddc86f9f80c937dd \ No newline at end of file diff --git a/modules/repository-gcs/licenses/gson-2.8.9.jar.sha1 b/modules/repository-gcs/licenses/gson-2.8.9.jar.sha1 deleted file mode 100644 index f7a8108d8c8e6..0000000000000 --- a/modules/repository-gcs/licenses/gson-2.8.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8a432c1d6825781e21a02db2e2c33c5fde2833b9 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/guava-30.1.1-jre.jar.sha1 b/modules/repository-gcs/licenses/guava-30.1.1-jre.jar.sha1 deleted file mode 100644 index 39e641fc7834f..0000000000000 --- a/modules/repository-gcs/licenses/guava-30.1.1-jre.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87e0fd1df874ea3cbe577702fe6f17068b790fd8 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/jackson-core-2.13.2.jar.sha1 b/modules/repository-gcs/licenses/jackson-core-2.13.2.jar.sha1 deleted file mode 100644 index eb8a8bc45f041..0000000000000 --- a/modules/repository-gcs/licenses/jackson-core-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6a0e0620d51833feffc67bccb51937b2345763 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/log4j-1.2-api-2.18.0.jar.sha1 b/modules/repository-gcs/licenses/log4j-1.2-api-2.18.0.jar.sha1 deleted file mode 100644 index 882888ed2de0a..0000000000000 --- a/modules/repository-gcs/licenses/log4j-1.2-api-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09b1039c025e0d9a792daf1af0eac564e7181210 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/opencensus-api-0.28.0.jar.sha1 b/modules/repository-gcs/licenses/opencensus-api-0.28.0.jar.sha1 deleted file mode 100644 index e7e2d46fd074c..0000000000000 --- a/modules/repository-gcs/licenses/opencensus-api-0.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0fc0d06a9d975a38c581dff59b99cf31db78bd99 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/opencensus-contrib-http-util-0.28.0.jar.sha1 b/modules/repository-gcs/licenses/opencensus-contrib-http-util-0.28.0.jar.sha1 deleted file mode 100644 index 164fa23ede758..0000000000000 --- a/modules/repository-gcs/licenses/opencensus-contrib-http-util-0.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6cb276330197d51dd65327fc305a3df7e622705 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/proto-google-common-protos-2.3.2.jar.sha1 b/modules/repository-gcs/licenses/proto-google-common-protos-2.3.2.jar.sha1 deleted file mode 100644 index 789e467a3f74d..0000000000000 --- a/modules/repository-gcs/licenses/proto-google-common-protos-2.3.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a35fd6ed973f752604fce97a21eb1e09d6afc467 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/proto-google-iam-v1-1.0.14.jar.sha1 b/modules/repository-gcs/licenses/proto-google-iam-v1-1.0.14.jar.sha1 deleted file mode 100644 index c74b581d09d1b..0000000000000 --- a/modules/repository-gcs/licenses/proto-google-iam-v1-1.0.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bc86a81d4bd99bfb54e9591b8de3ccd515fde78 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/protobuf-java-3.21.1.jar.sha1 b/modules/repository-gcs/licenses/protobuf-java-3.21.1.jar.sha1 deleted file mode 100644 index 2336816611bfe..0000000000000 --- a/modules/repository-gcs/licenses/protobuf-java-3.21.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2e396173a5b6ab549d790eba21c1d125bfe92912 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/protobuf-java-util-3.17.3.jar.sha1 b/modules/repository-gcs/licenses/protobuf-java-util-3.17.3.jar.sha1 deleted file mode 100644 index b130d7fb53c84..0000000000000 --- a/modules/repository-gcs/licenses/protobuf-java-util-3.17.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4340f06a346f46eab1b38feb066e4a2d30aed3b7 \ No newline at end of file diff --git a/modules/repository-gcs/licenses/threetenbp-1.5.1.jar.sha1 b/modules/repository-gcs/licenses/threetenbp-1.5.1.jar.sha1 deleted file mode 100644 index 5640b4c080ff3..0000000000000 --- a/modules/repository-gcs/licenses/threetenbp-1.5.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4307ad2fdd4ba8b5ecd3fdb88b932aa49fa25920 \ No newline at end of file diff --git a/modules/repository-s3/licenses/aws-java-sdk-core-1.11.749.jar.sha1 b/modules/repository-s3/licenses/aws-java-sdk-core-1.11.749.jar.sha1 deleted file mode 100644 index 7bc18d6d4f681..0000000000000 --- a/modules/repository-s3/licenses/aws-java-sdk-core-1.11.749.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1da5c1549295cfeebc67fc1c7539785a9441755b \ No newline at end of file diff --git a/modules/repository-s3/licenses/aws-java-sdk-s3-1.11.749.jar.sha1 b/modules/repository-s3/licenses/aws-java-sdk-s3-1.11.749.jar.sha1 deleted file mode 100644 index af794dc59dd7f..0000000000000 --- a/modules/repository-s3/licenses/aws-java-sdk-s3-1.11.749.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7d069f82723907ccdbd0c91ef0ac76046f5c9652 \ No newline at end of file diff --git a/modules/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 b/modules/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 deleted file mode 100644 index 29c9a93542058..0000000000000 --- a/modules/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -724bd22c0ff41c496469e18f9bea12bdfb2f7540 \ No newline at end of file diff --git a/modules/repository-s3/licenses/commons-codec-1.14.jar.sha1 b/modules/repository-s3/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/modules/repository-s3/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/modules/repository-s3/licenses/commons-logging-1.1.3.jar.sha1 b/modules/repository-s3/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index c8756c438320f..0000000000000 --- a/modules/repository-s3/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f diff --git a/modules/repository-s3/licenses/httpclient-4.5.13.jar.sha1 b/modules/repository-s3/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/modules/repository-s3/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/modules/repository-s3/licenses/httpcore-4.4.13.jar.sha1 b/modules/repository-s3/licenses/httpcore-4.4.13.jar.sha1 deleted file mode 100644 index 0cb64863b9760..0000000000000 --- a/modules/repository-s3/licenses/httpcore-4.4.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -853b96d3afbb7bf8cc303fe27ee96836a10c1834 \ No newline at end of file diff --git a/modules/repository-s3/licenses/jackson-annotations-2.13.2.jar.sha1 b/modules/repository-s3/licenses/jackson-annotations-2.13.2.jar.sha1 deleted file mode 100644 index ecd3fb49d5b12..0000000000000 --- a/modules/repository-s3/licenses/jackson-annotations-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec18851f1976d5b810ae1a5fcc32520d2d38f77a \ No newline at end of file diff --git a/modules/repository-s3/licenses/jackson-core-2.13.2.jar.sha1 b/modules/repository-s3/licenses/jackson-core-2.13.2.jar.sha1 deleted file mode 100644 index eb8a8bc45f041..0000000000000 --- a/modules/repository-s3/licenses/jackson-core-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6a0e0620d51833feffc67bccb51937b2345763 \ No newline at end of file diff --git a/modules/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1 b/modules/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1 deleted file mode 100644 index 5d356f3fd045f..0000000000000 --- a/modules/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -926e48c451166a291f1ce6c6276d9abbefa7c00f \ No newline at end of file diff --git a/modules/repository-s3/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 b/modules/repository-s3/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 deleted file mode 100644 index 3a4f0e1b17565..0000000000000 --- a/modules/repository-s3/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4fc77e1ec6922fc48bf1181e4b38f600dac222ff \ No newline at end of file diff --git a/modules/repository-s3/licenses/jaxb-api-2.2.2.jar.sha1 b/modules/repository-s3/licenses/jaxb-api-2.2.2.jar.sha1 deleted file mode 100644 index a37e187238933..0000000000000 --- a/modules/repository-s3/licenses/jaxb-api-2.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aeb3021ca93dde265796d82015beecdcff95bf09 \ No newline at end of file diff --git a/modules/repository-s3/licenses/jmespath-java-1.11.749.jar.sha1 b/modules/repository-s3/licenses/jmespath-java-1.11.749.jar.sha1 deleted file mode 100644 index 3467802d074c7..0000000000000 --- a/modules/repository-s3/licenses/jmespath-java-1.11.749.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -778866bc557dba508ee0eab2a0c5bfde468e49e6 \ No newline at end of file diff --git a/modules/repository-s3/licenses/joda-time-2.8.1.jar.sha1 b/modules/repository-s3/licenses/joda-time-2.8.1.jar.sha1 deleted file mode 100644 index 2a0f7df39a13c..0000000000000 --- a/modules/repository-s3/licenses/joda-time-2.8.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f5bfc718c95a7b1d3c371bb02a188a4df18361a9 \ No newline at end of file diff --git a/modules/repository-s3/licenses/log4j-1.2-api-2.18.0.jar.sha1 b/modules/repository-s3/licenses/log4j-1.2-api-2.18.0.jar.sha1 deleted file mode 100644 index 882888ed2de0a..0000000000000 --- a/modules/repository-s3/licenses/log4j-1.2-api-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09b1039c025e0d9a792daf1af0eac564e7181210 \ No newline at end of file diff --git a/modules/repository-url/licenses/commons-codec-1.14.jar.sha1 b/modules/repository-url/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/modules/repository-url/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/modules/repository-url/licenses/commons-logging-1.1.3.jar.sha1 b/modules/repository-url/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/modules/repository-url/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/modules/repository-url/licenses/httpclient-4.5.13.jar.sha1 b/modules/repository-url/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/modules/repository-url/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/modules/repository-url/licenses/httpcore-4.4.13.jar.sha1 b/modules/repository-url/licenses/httpcore-4.4.13.jar.sha1 deleted file mode 100644 index 0cb64863b9760..0000000000000 --- a/modules/repository-url/licenses/httpcore-4.4.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -853b96d3afbb7bf8cc303fe27ee96836a10c1834 \ No newline at end of file diff --git a/modules/repository-url/licenses/log4j-1.2-api-2.18.0.jar.sha1 b/modules/repository-url/licenses/log4j-1.2-api-2.18.0.jar.sha1 deleted file mode 100644 index 882888ed2de0a..0000000000000 --- a/modules/repository-url/licenses/log4j-1.2-api-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09b1039c025e0d9a792daf1af0eac564e7181210 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.77.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.77.Final.jar.sha1 deleted file mode 100644 index c3ead4fa2346c..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d97571f99e5e739d86824d0df99f35d295276b5f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.77.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.77.Final.jar.sha1 deleted file mode 100644 index 9bf5943c8f935..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4efc5f59335301d6ba0d7cd31dd10651119b03c8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.77.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.77.Final.jar.sha1 deleted file mode 100644 index ba358e7de3ee1..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5ac5afa9af5b4dc0e8bdbfd686979af77ebdb3c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.77.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.77.Final.jar.sha1 deleted file mode 100644 index c8a4b9043b3e6..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea0fc20f4e6178966b9d62017b7fcb83dfe0e713 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.77.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.77.Final.jar.sha1 deleted file mode 100644 index 0b16b55ff5480..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47a81089de03635a27f509f3e4e13386ae1db275 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.77.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.77.Final.jar.sha1 deleted file mode 100644 index c12c9ba9617b8..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4a239dbf8d8bb5f98aa51462c35011c0516395fd \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.77.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.77.Final.jar.sha1 deleted file mode 100644 index f0593c4e6b579..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.77.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a3373bbd20d520c821f210bd5ee886788512043 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/icu4j-68.2.jar.sha1 b/plugins/analysis-icu/licenses/icu4j-68.2.jar.sha1 deleted file mode 100644 index fcb3d79075099..0000000000000 --- a/plugins/analysis-icu/licenses/icu4j-68.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -76893e6000401ace133a65262254be0ebe556d46 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.14.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/plugins/analysis-phonetic/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.1.jar.sha1 deleted file mode 100644 index 07d523ec0c82b..0000000000000 --- a/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87866deba6aa5d19956fbe3406d8ddb5f19f5352 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.1.jar.sha1 deleted file mode 100644 index 22af41d2b6b1b..0000000000000 --- a/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c169bab2e7dd04f5cb03d179a73a4339cc1d0a2 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/morfologik-ukrainian-search-3.7.5.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-ukrainian-search-3.7.5.jar.sha1 deleted file mode 100644 index 446e7a91161a8..0000000000000 --- a/plugins/analysis-ukrainian/licenses/morfologik-ukrainian-search-3.7.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b8c8fbd740164d220ca7d18605b8b2092e163e9 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/azure-core-0.9.3.jar.sha1 b/plugins/discovery-azure-classic/licenses/azure-core-0.9.3.jar.sha1 deleted file mode 100644 index 5947972663e46..0000000000000 --- a/plugins/discovery-azure-classic/licenses/azure-core-0.9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7fe32241b738aad0f700f4277fa998230c144ae7 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 b/plugins/discovery-azure-classic/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 deleted file mode 100644 index d427170d5781b..0000000000000 --- a/plugins/discovery-azure-classic/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -602d3e6f5a9f058c2439e8fdf1270cddc811b440 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.14.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-io-2.4.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.4.jar.sha1 deleted file mode 100644 index 2f5b30d0edbbb..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-io-2.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1b6ea3b7e4aa4f492509a4952029cd8e48019ad \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-lang-2.6.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-lang-2.6.jar.sha1 deleted file mode 100644 index 4ee9249d2b76f..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-lang-2.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0ce1edb914c94ebc388f086c6827e8bdeec71ac2 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index c8756c438320f..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f diff --git a/plugins/discovery-azure-classic/licenses/httpclient-4.5.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/plugins/discovery-azure-classic/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.13.jar.sha1 deleted file mode 100644 index 0cb64863b9760..0000000000000 --- a/plugins/discovery-azure-classic/licenses/httpcore-4.4.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -853b96d3afbb7bf8cc303fe27ee96836a10c1834 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/javax.inject-1.jar.sha1 b/plugins/discovery-azure-classic/licenses/javax.inject-1.jar.sha1 deleted file mode 100644 index 7ef3c707b3c68..0000000000000 --- a/plugins/discovery-azure-classic/licenses/javax.inject-1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6975da39a7040257bd51d21a231b76c915872d38 diff --git a/plugins/discovery-azure-classic/licenses/jaxb-api-2.2.2.jar.sha1 b/plugins/discovery-azure-classic/licenses/jaxb-api-2.2.2.jar.sha1 deleted file mode 100644 index a37e187238933..0000000000000 --- a/plugins/discovery-azure-classic/licenses/jaxb-api-2.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aeb3021ca93dde265796d82015beecdcff95bf09 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/jaxb-impl-2.2.3-1.jar.sha1 b/plugins/discovery-azure-classic/licenses/jaxb-impl-2.2.3-1.jar.sha1 deleted file mode 100644 index 79fe55d773670..0000000000000 --- a/plugins/discovery-azure-classic/licenses/jaxb-impl-2.2.3-1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -56baae106392040a45a06d4a41099173425da1e6 diff --git a/plugins/discovery-azure-classic/licenses/jersey-client-1.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/jersey-client-1.13.jar.sha1 deleted file mode 100644 index 6244c693f44fb..0000000000000 --- a/plugins/discovery-azure-classic/licenses/jersey-client-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0ec38c57a78940bf5f8f5971307ca89406849647 diff --git a/plugins/discovery-azure-classic/licenses/jersey-core-1.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/jersey-core-1.13.jar.sha1 deleted file mode 100644 index ee2aa99db3798..0000000000000 --- a/plugins/discovery-azure-classic/licenses/jersey-core-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4326a56dc6b2d67b7313905c353e1af225bb164f diff --git a/plugins/discovery-azure-classic/licenses/joda-time-2.10.10.jar.sha1 b/plugins/discovery-azure-classic/licenses/joda-time-2.10.10.jar.sha1 deleted file mode 100644 index 50a9ea517e7e5..0000000000000 --- a/plugins/discovery-azure-classic/licenses/joda-time-2.10.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -29e8126e31f41e5c12b9fe3a7eb02e704c47d70b \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.18.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.18.0.jar.sha1 deleted file mode 100644 index 882888ed2de0a..0000000000000 --- a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09b1039c025e0d9a792daf1af0eac564e7181210 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/mail-1.4.5.jar.sha1 b/plugins/discovery-azure-classic/licenses/mail-1.4.5.jar.sha1 deleted file mode 100644 index b79503e0c69d9..0000000000000 --- a/plugins/discovery-azure-classic/licenses/mail-1.4.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -85319c87280f30e1afc54c355f91f44741beac49 diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.749.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.749.jar.sha1 deleted file mode 100644 index 7bc18d6d4f681..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.749.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1da5c1549295cfeebc67fc1c7539785a9441755b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.749.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.749.jar.sha1 deleted file mode 100644 index c7c7220005fc3..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.749.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0865e0937c6500acf62ce9c8964eac76a8718f5f \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.14.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/plugins/discovery-ec2/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/discovery-ec2/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index c8756c438320f..0000000000000 --- a/plugins/discovery-ec2/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f diff --git a/plugins/discovery-ec2/licenses/httpclient-4.5.13.jar.sha1 b/plugins/discovery-ec2/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/plugins/discovery-ec2/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.13.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.13.jar.sha1 deleted file mode 100644 index 0cb64863b9760..0000000000000 --- a/plugins/discovery-ec2/licenses/httpcore-4.4.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -853b96d3afbb7bf8cc303fe27ee96836a10c1834 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.13.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.13.2.jar.sha1 deleted file mode 100644 index ecd3fb49d5b12..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec18851f1976d5b810ae1a5fcc32520d2d38f77a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-core-2.13.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-core-2.13.2.jar.sha1 deleted file mode 100644 index eb8a8bc45f041..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-core-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6a0e0620d51833feffc67bccb51937b2345763 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1 deleted file mode 100644 index 5d356f3fd045f..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -926e48c451166a291f1ce6c6276d9abbefa7c00f \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 deleted file mode 100644 index 3a4f0e1b17565..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4fc77e1ec6922fc48bf1181e4b38f600dac222ff \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/joda-time-2.10.10.jar.sha1 b/plugins/discovery-ec2/licenses/joda-time-2.10.10.jar.sha1 deleted file mode 100644 index 50a9ea517e7e5..0000000000000 --- a/plugins/discovery-ec2/licenses/joda-time-2.10.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -29e8126e31f41e5c12b9fe3a7eb02e704c47d70b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.18.0.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.18.0.jar.sha1 deleted file mode 100644 index 882888ed2de0a..0000000000000 --- a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09b1039c025e0d9a792daf1af0eac564e7181210 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-codec-1.14.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/plugins/discovery-gce/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/discovery-gce/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index c8756c438320f..0000000000000 --- a/plugins/discovery-gce/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f diff --git a/plugins/discovery-gce/licenses/failureaccess-1.0.1.jar.sha1 b/plugins/discovery-gce/licenses/failureaccess-1.0.1.jar.sha1 deleted file mode 100644 index 4798b37e20691..0000000000000 --- a/plugins/discovery-gce/licenses/failureaccess-1.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-client-1.33.1.jar.sha1 b/plugins/discovery-gce/licenses/google-api-client-1.33.1.jar.sha1 deleted file mode 100644 index 5e01249cf5477..0000000000000 --- a/plugins/discovery-gce/licenses/google-api-client-1.33.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adb1d145f2ddda6cc365585eb6113266f5d7d0c0 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20220322-1.32.1.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20220322-1.32.1.jar.sha1 deleted file mode 100644 index 117ccad645b6b..0000000000000 --- a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev20220322-1.32.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -865c67d7da65f928daf09718f05526b9f7687f29 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-1.41.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-1.41.1.jar.sha1 deleted file mode 100644 index 36362486494ba..0000000000000 --- a/plugins/discovery-gce/licenses/google-http-client-1.41.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfccee0380e290c3f05398a4b6599518d7692573 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-gson-1.41.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-gson-1.41.1.jar.sha1 deleted file mode 100644 index 29edb9e4c305b..0000000000000 --- a/plugins/discovery-gce/licenses/google-http-client-gson-1.41.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f78c28e784242ca47b4b180c6d794f1fab112160 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-jackson2-1.41.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-jackson2-1.41.1.jar.sha1 deleted file mode 100644 index 8d642261a58bc..0000000000000 --- a/plugins/discovery-gce/licenses/google-http-client-jackson2-1.41.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13062434e2f8bcaa95f3692ae78fcb3cd6064801 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.33.0.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.33.0.jar.sha1 deleted file mode 100644 index 93644ff9d02d8..0000000000000 --- a/plugins/discovery-gce/licenses/google-oauth-client-1.33.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de6ab408602db40d7fe6af474fd42e466e0f1fef \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-context-1.27.2.jar.sha1 b/plugins/discovery-gce/licenses/grpc-context-1.27.2.jar.sha1 deleted file mode 100644 index eb75368f1febb..0000000000000 --- a/plugins/discovery-gce/licenses/grpc-context-1.27.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1789190601b7a5361e4fa52b6bc95ec2cd71e854 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/guava-31.0.1-jre.jar.sha1 b/plugins/discovery-gce/licenses/guava-31.0.1-jre.jar.sha1 deleted file mode 100644 index 1906a4f95370c..0000000000000 --- a/plugins/discovery-gce/licenses/guava-31.0.1-jre.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -119ea2b2bc205b138974d351777b20f02b92704b \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/jackson-core-2.13.2.jar.sha1 b/plugins/discovery-gce/licenses/jackson-core-2.13.2.jar.sha1 deleted file mode 100644 index eb8a8bc45f041..0000000000000 --- a/plugins/discovery-gce/licenses/jackson-core-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6a0e0620d51833feffc67bccb51937b2345763 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/jsr305-3.0.2.jar.sha1 b/plugins/discovery-gce/licenses/jsr305-3.0.2.jar.sha1 deleted file mode 100644 index c5c92d87b9d6c..0000000000000 --- a/plugins/discovery-gce/licenses/jsr305-3.0.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25ea2e8b0c338a877313bd4672d3fe056ea78f0d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.18.0.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.18.0.jar.sha1 deleted file mode 100644 index 882888ed2de0a..0000000000000 --- a/plugins/discovery-gce/licenses/log4j-1.2-api-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09b1039c025e0d9a792daf1af0eac564e7181210 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/opencensus-api-0.30.0.jar.sha1 b/plugins/discovery-gce/licenses/opencensus-api-0.30.0.jar.sha1 deleted file mode 100644 index c81781c4eb9b7..0000000000000 --- a/plugins/discovery-gce/licenses/opencensus-api-0.30.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c445990db10f3b6cc71bcfb7b0ba3201e92f902c \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.30.0.jar.sha1 b/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.30.0.jar.sha1 deleted file mode 100644 index 666efa10923be..0000000000000 --- a/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.30.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2720ebf3ec454c8cd5d5bb1e035bb0390830d620 \ No newline at end of file diff --git a/plugins/repository-hdfs/hadoop-client-api/licenses/hadoop-client-api-3.3.3.jar.sha1 b/plugins/repository-hdfs/hadoop-client-api/licenses/hadoop-client-api-3.3.3.jar.sha1 deleted file mode 100644 index 8df133d0bd106..0000000000000 --- a/plugins/repository-hdfs/hadoop-client-api/licenses/hadoop-client-api-3.3.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d0593aed2d4df9bcee507550913d29d589ebd84a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 deleted file mode 100644 index d38d00127e8cd..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bf96b7aa8b611c177d329452af1dc933e14501c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.14.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.8.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.8.0.jar.sha1 deleted file mode 100644 index 8f08bafec984b..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-io-2.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -92999e26e6534606b5678014e66948286298a35c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.11.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.11.jar.sha1 deleted file mode 100644 index 56536386bde29..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-lang3-3.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68e9a6adf7cf8eb7e9d31bbc554c7c75eeaac568 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/repository-hdfs/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 deleted file mode 100644 index f980eebc7a46c..0000000000000 --- a/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52619ecfb0225d7ae67b15264521064824ac57ca \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 deleted file mode 100644 index 463b7415e4c4b..0000000000000 --- a/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4d199760c11d47f90e12fe3882e2b24c77e4eb5 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/javax.servlet-api-3.1.0.jar.sha1 b/plugins/repository-hdfs/licenses/javax.servlet-api-3.1.0.jar.sha1 deleted file mode 100644 index c66044ac5493e..0000000000000 --- a/plugins/repository-hdfs/licenses/javax.servlet-api-3.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cd63d075497751784b2fa84be59432f4905bf7c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-1.2-api-2.18.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-1.2-api-2.18.0.jar.sha1 deleted file mode 100644 index 882888ed2de0a..0000000000000 --- a/plugins/repository-hdfs/licenses/log4j-1.2-api-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09b1039c025e0d9a792daf1af0eac564e7181210 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 deleted file mode 100644 index f47bae03e8ea2..0000000000000 --- a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0ea6ef49f1349bb30e8c6e8a7052d0f3ee7a719 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.4.0.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.4.0.jar.sha1 deleted file mode 100644 index df9130ffeb5d5..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b32aba0cbe737a4ca953f71688725972e3ee927c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/server/licenses/HdrHistogram-2.1.9.jar.sha1 b/server/licenses/HdrHistogram-2.1.9.jar.sha1 deleted file mode 100644 index 2378df07b2c0c..0000000000000 --- a/server/licenses/HdrHistogram-2.1.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4631ce165eb400edecfa32e03d3f1be53dee754 \ No newline at end of file diff --git a/server/licenses/ecs-logging-core-1.2.0.jar.sha1 b/server/licenses/ecs-logging-core-1.2.0.jar.sha1 deleted file mode 100644 index fcb3f78058546..0000000000000 --- a/server/licenses/ecs-logging-core-1.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -37a47ec302777aca9f8ea10b7316f3a79d5d6202 \ No newline at end of file diff --git a/server/licenses/hppc-0.8.1.jar.sha1 b/server/licenses/hppc-0.8.1.jar.sha1 deleted file mode 100644 index 47684ed023210..0000000000000 --- a/server/licenses/hppc-0.8.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ffc7ba8f289428b9508ab484b8001dea944ae603 \ No newline at end of file diff --git a/server/licenses/jna-5.10.0.jar.sha1 b/server/licenses/jna-5.10.0.jar.sha1 deleted file mode 100644 index 3f0395cd5d178..0000000000000 --- a/server/licenses/jna-5.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cf4c87dd802db50721db66947aa237d7ad09418 \ No newline at end of file diff --git a/server/licenses/log4j-api-2.18.0.jar.sha1 b/server/licenses/log4j-api-2.18.0.jar.sha1 deleted file mode 100644 index 1f67f6c946dd6..0000000000000 --- a/server/licenses/log4j-api-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c72ad9b1d8d42e4ea7befd8248bf05877af4c63d \ No newline at end of file diff --git a/server/licenses/log4j-core-2.18.0.jar.sha1 b/server/licenses/log4j-core-2.18.0.jar.sha1 deleted file mode 100644 index fc69f08c2deb7..0000000000000 --- a/server/licenses/log4j-core-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -07c1882ede137548925eadb750615edab2f6e13c \ No newline at end of file diff --git a/server/licenses/log4j2-ecs-layout-1.2.0.jar.sha1 b/server/licenses/log4j2-ecs-layout-1.2.0.jar.sha1 deleted file mode 100644 index 79acd00b9326e..0000000000000 --- a/server/licenses/log4j2-ecs-layout-1.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba51fb2064cd5f6bc136e95c1463e3e68d823403 \ No newline at end of file diff --git a/server/licenses/t-digest-3.2.jar.sha1 b/server/licenses/t-digest-3.2.jar.sha1 deleted file mode 100644 index de6e848545f38..0000000000000 --- a/server/licenses/t-digest-3.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2ab94758b0276a8a26102adf8d528cf6d0567b9a \ No newline at end of file diff --git a/test/x-content/licenses/commons-compress-1.21.jar.sha1 b/test/x-content/licenses/commons-compress-1.21.jar.sha1 deleted file mode 100644 index 81ac609a1aa26..0000000000000 --- a/test/x-content/licenses/commons-compress-1.21.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ec95b60d4e86b5c95a0e919cb172a0af98011ef \ No newline at end of file diff --git a/test/x-content/licenses/commons-lang3-3.9.jar.sha1 b/test/x-content/licenses/commons-lang3-3.9.jar.sha1 deleted file mode 100644 index 2adcfd377f87c..0000000000000 --- a/test/x-content/licenses/commons-lang3-3.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0122c7cee69b53ed4a7681c03d4ee4c0e2765da5 \ No newline at end of file diff --git a/test/x-content/licenses/jackson-annotations-2.13.2.jar.sha1 b/test/x-content/licenses/jackson-annotations-2.13.2.jar.sha1 deleted file mode 100644 index ecd3fb49d5b12..0000000000000 --- a/test/x-content/licenses/jackson-annotations-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec18851f1976d5b810ae1a5fcc32520d2d38f77a \ No newline at end of file diff --git a/test/x-content/licenses/jackson-core-2.13.2.jar.sha1 b/test/x-content/licenses/jackson-core-2.13.2.jar.sha1 deleted file mode 100644 index eb8a8bc45f041..0000000000000 --- a/test/x-content/licenses/jackson-core-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6a0e0620d51833feffc67bccb51937b2345763 \ No newline at end of file diff --git a/test/x-content/licenses/jackson-databind-2.13.2.jar.sha1 b/test/x-content/licenses/jackson-databind-2.13.2.jar.sha1 deleted file mode 100644 index 5d356f3fd045f..0000000000000 --- a/test/x-content/licenses/jackson-databind-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -926e48c451166a291f1ce6c6276d9abbefa7c00f \ No newline at end of file diff --git a/test/x-content/licenses/json-schema-validator-1.0.48.jar.sha1 b/test/x-content/licenses/json-schema-validator-1.0.48.jar.sha1 deleted file mode 100644 index d9a70e8234db9..0000000000000 --- a/test/x-content/licenses/json-schema-validator-1.0.48.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d28a6fb127101a59a452e338dbda973d09cf3434 \ No newline at end of file diff --git a/x-pack/plugin/analytics/licenses/commons-math3-3.6.1.jar.sha1 b/x-pack/plugin/analytics/licenses/commons-math3-3.6.1.jar.sha1 deleted file mode 100644 index 72975be4c8851..0000000000000 --- a/x-pack/plugin/analytics/licenses/commons-math3-3.6.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4ba98f1d4b3c80ec46392f25e094a6a2e58fcbf \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/commons-codec-1.14.jar.sha1 b/x-pack/plugin/core/licenses/commons-codec-1.14.jar.sha1 deleted file mode 100644 index 9fe75b9a90da7..0000000000000 --- a/x-pack/plugin/core/licenses/commons-codec-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cb1181b2141a7e752f5bdc998b7ef1849f726cf \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/commons-logging-1.1.3.jar.sha1 b/x-pack/plugin/core/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/x-pack/plugin/core/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpasyncclient-4.1.5.jar.sha1 b/x-pack/plugin/core/licenses/httpasyncclient-4.1.5.jar.sha1 deleted file mode 100644 index 366a9e31069a6..0000000000000 --- a/x-pack/plugin/core/licenses/httpasyncclient-4.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd18227f1eb8e9a263286c1d7362ceb24f6f9b32 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpclient-4.5.13.jar.sha1 b/x-pack/plugin/core/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/x-pack/plugin/core/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpcore-4.4.13.jar.sha1 b/x-pack/plugin/core/licenses/httpcore-4.4.13.jar.sha1 deleted file mode 100644 index 0cb64863b9760..0000000000000 --- a/x-pack/plugin/core/licenses/httpcore-4.4.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -853b96d3afbb7bf8cc303fe27ee96836a10c1834 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpcore-nio-4.4.13.jar.sha1 b/x-pack/plugin/core/licenses/httpcore-nio-4.4.13.jar.sha1 deleted file mode 100644 index 7629b7d5584c8..0000000000000 --- a/x-pack/plugin/core/licenses/httpcore-nio-4.4.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f897ace4d7f10f0ea6a58f524a3b105dd483653 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/log4j-1.2-api-2.18.0.jar.sha1 b/x-pack/plugin/core/licenses/log4j-1.2-api-2.18.0.jar.sha1 deleted file mode 100644 index 882888ed2de0a..0000000000000 --- a/x-pack/plugin/core/licenses/log4j-1.2-api-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09b1039c025e0d9a792daf1af0eac564e7181210 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/unboundid-ldapsdk-6.0.3.jar.sha1 b/x-pack/plugin/core/licenses/unboundid-ldapsdk-6.0.3.jar.sha1 deleted file mode 100644 index 4ea966b7afa79..0000000000000 --- a/x-pack/plugin/core/licenses/unboundid-ldapsdk-6.0.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e6e50ca49ba6270ea61d9bf7575b7c0dc16e2e8f \ No newline at end of file diff --git a/x-pack/plugin/eql/qa/common/licenses/jtoml-2.0.0.jar.sha1 b/x-pack/plugin/eql/qa/common/licenses/jtoml-2.0.0.jar.sha1 deleted file mode 100644 index ccf73fed50e38..0000000000000 --- a/x-pack/plugin/eql/qa/common/licenses/jtoml-2.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2085505b2b173e14b2a7702edd703ce185fd6206 diff --git a/x-pack/plugin/identity-provider/licenses/cryptacular-1.2.4.jar.sha1 b/x-pack/plugin/identity-provider/licenses/cryptacular-1.2.4.jar.sha1 deleted file mode 100644 index 19095bb5dff64..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/cryptacular-1.2.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4994c015d87886212683245d13e87f6fb903a760 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/failureaccess-1.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/failureaccess-1.0.1.jar.sha1 deleted file mode 100644 index 4798b37e20691..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/failureaccess-1.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/guava-28.2-jre.jar.sha1 b/x-pack/plugin/identity-provider/licenses/guava-28.2-jre.jar.sha1 deleted file mode 100644 index 23fd21a17670e..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/guava-28.2-jre.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8ec9ed76528425762174f0011ce8f74ad845b756 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/httpclient-cache-4.5.13.jar.sha1 b/x-pack/plugin/identity-provider/licenses/httpclient-cache-4.5.13.jar.sha1 deleted file mode 100644 index 08647c1ca2010..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/httpclient-cache-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4abee263cbc9edc12393212ca3a7c89af0755b1f \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/java-support-8.0.0.jar.sha1 b/x-pack/plugin/identity-provider/licenses/java-support-8.0.0.jar.sha1 deleted file mode 100644 index d4e6f8a43261c..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/java-support-8.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -298f946e93922d789b6231599a446cea9dbbe80e \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/jsr305-3.0.2.jar.sha1 b/x-pack/plugin/identity-provider/licenses/jsr305-3.0.2.jar.sha1 deleted file mode 100644 index c5c92d87b9d6c..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/jsr305-3.0.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25ea2e8b0c338a877313bd4672d3fe056ea78f0d \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 b/x-pack/plugin/identity-provider/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 deleted file mode 100644 index f47bae03e8ea2..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0ea6ef49f1349bb30e8c6e8a7052d0f3ee7a719 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/metrics-core-4.1.4.jar.sha1 b/x-pack/plugin/identity-provider/licenses/metrics-core-4.1.4.jar.sha1 deleted file mode 100644 index b578444834f04..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/metrics-core-4.1.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ca98c8e95b22ac51a7aab332ff0d6ff004ce159e \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-core-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-core-4.0.1.jar.sha1 deleted file mode 100644 index ffd31fc065f45..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-core-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec3d1734137d6ccabba7d6d5e149f571beeaa673 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-messaging-api-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-messaging-api-4.0.1.jar.sha1 deleted file mode 100644 index d3b7de3e0169b..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-messaging-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eb9c9971f6bd2a6681a2a692a1f29a35874de389 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-messaging-impl-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-messaging-impl-4.0.1.jar.sha1 deleted file mode 100644 index 1b7a6502cfd5d..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-messaging-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -938ef9b81f5aa5762d04c82e2e6b40cdb1ab4685 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-profile-api-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-profile-api-4.0.1.jar.sha1 deleted file mode 100644 index 541f1f16fa945..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-profile-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bece5f6d30d4051e6eeaf2b88dd1e5a13f6b28b7 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-profile-impl-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-profile-impl-4.0.1.jar.sha1 deleted file mode 100644 index 85b6386b4892b..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-profile-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74cb2a74d1e392f339d5772fb6fa436eb77459a0 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-saml-api-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-saml-api-4.0.1.jar.sha1 deleted file mode 100644 index 06a229f8ec672..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-saml-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2205aba935f4da468382a3dc5f32c3821ec1564c \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-saml-impl-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-saml-impl-4.0.1.jar.sha1 deleted file mode 100644 index e14c524517f44..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-saml-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -995986fd848ede1443469f3aff1f82b740224262 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-security-api-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-security-api-4.0.1.jar.sha1 deleted file mode 100644 index 8d9f052e4f4d5..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-security-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3d33ca18cde2a7c7e3643aeca9f03974be9577d \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-security-impl-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-security-impl-4.0.1.jar.sha1 deleted file mode 100644 index 435c03939695d..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-security-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -64568e9aa8bd7bcd76983e462f9eb2c3dcacbdce \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-soap-api-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-soap-api-4.0.1.jar.sha1 deleted file mode 100644 index 9937a971f62e8..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-soap-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d8e11e31cb5164788a530478e1831969e94a38b6 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-soap-impl-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-soap-impl-4.0.1.jar.sha1 deleted file mode 100644 index 3cd93b16d04ba..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-soap-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -38bfaf5fc189774e94ead218bd1c754da295c226 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-storage-api-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-storage-api-4.0.1.jar.sha1 deleted file mode 100644 index 8ba6db7318d83..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-storage-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4e46a7f965ac9f91976b0f298fd4d4e69e9056db \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-storage-impl-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-storage-impl-4.0.1.jar.sha1 deleted file mode 100644 index 64aa6efe38f9e..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-storage-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13f9fb617fb7d04ae976b4d0610171ea32291ee0 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-xmlsec-api-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-xmlsec-api-4.0.1.jar.sha1 deleted file mode 100644 index 31e06d580ebe5..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-xmlsec-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -edb4365d3d183933cf0d0b31966ea352b8d20c60 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/opensaml-xmlsec-impl-4.0.1.jar.sha1 b/x-pack/plugin/identity-provider/licenses/opensaml-xmlsec-impl-4.0.1.jar.sha1 deleted file mode 100644 index 63ef96356c4d7..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/opensaml-xmlsec-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -efa15ba85127ac3b20c75b8d4f04c7e92325a00a \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/slf4j-api-1.6.2.jar.sha1 b/x-pack/plugin/identity-provider/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/x-pack/plugin/identity-provider/licenses/xmlsec-2.1.4.jar.sha1 b/x-pack/plugin/identity-provider/licenses/xmlsec-2.1.4.jar.sha1 deleted file mode 100644 index d85a4194f6a59..0000000000000 --- a/x-pack/plugin/identity-provider/licenses/xmlsec-2.1.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb43326f02e3e77526c24269c8b5d3cc3f7f6653 \ No newline at end of file diff --git a/x-pack/plugin/ml/licenses/commons-math3-3.6.1.jar.sha1 b/x-pack/plugin/ml/licenses/commons-math3-3.6.1.jar.sha1 deleted file mode 100644 index ed9a549757f50..0000000000000 --- a/x-pack/plugin/ml/licenses/commons-math3-3.6.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4ba98f1d4b3c80ec46392f25e094a6a2e58fcbf diff --git a/x-pack/plugin/ml/licenses/icu4j-68.2.jar.sha1 b/x-pack/plugin/ml/licenses/icu4j-68.2.jar.sha1 deleted file mode 100644 index fcb3d79075099..0000000000000 --- a/x-pack/plugin/ml/licenses/icu4j-68.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -76893e6000401ace133a65262254be0ebe556d46 \ No newline at end of file diff --git a/x-pack/plugin/ml/licenses/ojalgo-51.2.0.jar.sha1 b/x-pack/plugin/ml/licenses/ojalgo-51.2.0.jar.sha1 deleted file mode 100644 index 1bac53d7e41a6..0000000000000 --- a/x-pack/plugin/ml/licenses/ojalgo-51.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c51c0a6d6ed1d3f541c53048bce6769137cc3e90 diff --git a/x-pack/plugin/ql/licenses/antlr4-runtime-4.9.2.jar.sha1 b/x-pack/plugin/ql/licenses/antlr4-runtime-4.9.2.jar.sha1 deleted file mode 100644 index 4c1ccb96d5302..0000000000000 --- a/x-pack/plugin/ql/licenses/antlr4-runtime-4.9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ece33ec76e002dfde574cf7b57451a91a99185c5 \ No newline at end of file diff --git a/x-pack/plugin/security/cli/licenses/bcpkix-jdk15on-1.64.jar.sha1 b/x-pack/plugin/security/cli/licenses/bcpkix-jdk15on-1.64.jar.sha1 deleted file mode 100644 index 568a9be16bee7..0000000000000 --- a/x-pack/plugin/security/cli/licenses/bcpkix-jdk15on-1.64.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3dac163e20110817d850d17e0444852a6d7d0bd7 \ No newline at end of file diff --git a/x-pack/plugin/security/cli/licenses/bcprov-jdk15on-1.64.jar.sha1 b/x-pack/plugin/security/cli/licenses/bcprov-jdk15on-1.64.jar.sha1 deleted file mode 100644 index 85b5d4e1e1805..0000000000000 --- a/x-pack/plugin/security/cli/licenses/bcprov-jdk15on-1.64.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1467dac1b787b5ad2a18201c0c281df69882259e \ No newline at end of file diff --git a/x-pack/plugin/security/cli/licenses/commons-io-2.5.jar.sha1 b/x-pack/plugin/security/cli/licenses/commons-io-2.5.jar.sha1 deleted file mode 100644 index b7f1d93e89702..0000000000000 --- a/x-pack/plugin/security/cli/licenses/commons-io-2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2852e6e05fbb95076fc091f6d1780f1f8fe35e0f \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/accessors-smart-2.4.2.jar.sha1 b/x-pack/plugin/security/licenses/accessors-smart-2.4.2.jar.sha1 deleted file mode 100644 index 2d178b9cedb07..0000000000000 --- a/x-pack/plugin/security/licenses/accessors-smart-2.4.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f09981a3c80f0766998c68d83bfd060812d5bcd \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/asm-8.0.1.jar.sha1 b/x-pack/plugin/security/licenses/asm-8.0.1.jar.sha1 deleted file mode 100644 index b464db16af8a4..0000000000000 --- a/x-pack/plugin/security/licenses/asm-8.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f5199523fb95304b44563f5d56d9f5a07270669 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/cryptacular-1.2.4.jar.sha1 b/x-pack/plugin/security/licenses/cryptacular-1.2.4.jar.sha1 deleted file mode 100644 index 19095bb5dff64..0000000000000 --- a/x-pack/plugin/security/licenses/cryptacular-1.2.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4994c015d87886212683245d13e87f6fb903a760 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/failureaccess-1.0.1.jar.sha1 b/x-pack/plugin/security/licenses/failureaccess-1.0.1.jar.sha1 deleted file mode 100644 index 4798b37e20691..0000000000000 --- a/x-pack/plugin/security/licenses/failureaccess-1.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/guava-28.2-jre.jar.sha1 b/x-pack/plugin/security/licenses/guava-28.2-jre.jar.sha1 deleted file mode 100644 index 23fd21a17670e..0000000000000 --- a/x-pack/plugin/security/licenses/guava-28.2-jre.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8ec9ed76528425762174f0011ce8f74ad845b756 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/httpclient-cache-4.5.13.jar.sha1 b/x-pack/plugin/security/licenses/httpclient-cache-4.5.13.jar.sha1 deleted file mode 100644 index 08647c1ca2010..0000000000000 --- a/x-pack/plugin/security/licenses/httpclient-cache-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4abee263cbc9edc12393212ca3a7c89af0755b1f \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/jakarta.mail-1.6.3.jar.sha1 b/x-pack/plugin/security/licenses/jakarta.mail-1.6.3.jar.sha1 deleted file mode 100644 index 12d5021ee3752..0000000000000 --- a/x-pack/plugin/security/licenses/jakarta.mail-1.6.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -787e007e377223bba85a33599d3da416c135f99b \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/java-support-8.0.0.jar.sha1 b/x-pack/plugin/security/licenses/java-support-8.0.0.jar.sha1 deleted file mode 100644 index d4e6f8a43261c..0000000000000 --- a/x-pack/plugin/security/licenses/java-support-8.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -298f946e93922d789b6231599a446cea9dbbe80e \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/jcip-annotations-1.0.jar.sha1 b/x-pack/plugin/security/licenses/jcip-annotations-1.0.jar.sha1 deleted file mode 100644 index 9eaed5270992b..0000000000000 --- a/x-pack/plugin/security/licenses/jcip-annotations-1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afba4942caaeaf46aab0b976afd57cc7c181467e \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/joda-time-2.10.10.jar.sha1 b/x-pack/plugin/security/licenses/joda-time-2.10.10.jar.sha1 deleted file mode 100644 index 50a9ea517e7e5..0000000000000 --- a/x-pack/plugin/security/licenses/joda-time-2.10.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -29e8126e31f41e5c12b9fe3a7eb02e704c47d70b \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/json-smart-2.4.2.jar.sha1 b/x-pack/plugin/security/licenses/json-smart-2.4.2.jar.sha1 deleted file mode 100644 index 96d0eee8eb122..0000000000000 --- a/x-pack/plugin/security/licenses/json-smart-2.4.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7fcd0f985696c37cd3546f19c85c2ff367f2e85 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/jsr305-3.0.2.jar.sha1 b/x-pack/plugin/security/licenses/jsr305-3.0.2.jar.sha1 deleted file mode 100644 index c5c92d87b9d6c..0000000000000 --- a/x-pack/plugin/security/licenses/jsr305-3.0.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25ea2e8b0c338a877313bd4672d3fe056ea78f0d \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/lang-tag-1.4.4.jar.sha1 b/x-pack/plugin/security/licenses/lang-tag-1.4.4.jar.sha1 deleted file mode 100644 index 9f21e84c8af3f..0000000000000 --- a/x-pack/plugin/security/licenses/lang-tag-1.4.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1db9a709239ae473a69b5424c7e78d0b7108229d \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 b/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 deleted file mode 100644 index f47bae03e8ea2..0000000000000 --- a/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0ea6ef49f1349bb30e8c6e8a7052d0f3ee7a719 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/metrics-core-4.1.4.jar.sha1 b/x-pack/plugin/security/licenses/metrics-core-4.1.4.jar.sha1 deleted file mode 100644 index b578444834f04..0000000000000 --- a/x-pack/plugin/security/licenses/metrics-core-4.1.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ca98c8e95b22ac51a7aab332ff0d6ff004ce159e \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/nimbus-jose-jwt-9.8.1.jar.sha1 b/x-pack/plugin/security/licenses/nimbus-jose-jwt-9.8.1.jar.sha1 deleted file mode 100644 index 2d8bf0caecc59..0000000000000 --- a/x-pack/plugin/security/licenses/nimbus-jose-jwt-9.8.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2af7f734313320e4b156522d22ce32b775633909 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/oauth2-oidc-sdk-9.3.1.jar.sha1 b/x-pack/plugin/security/licenses/oauth2-oidc-sdk-9.3.1.jar.sha1 deleted file mode 100644 index 8cf85da9737ac..0000000000000 --- a/x-pack/plugin/security/licenses/oauth2-oidc-sdk-9.3.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -85891e8c391911ee1073f5e1737689cd804f1a9b \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-core-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-core-4.0.1.jar.sha1 deleted file mode 100644 index ffd31fc065f45..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-core-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec3d1734137d6ccabba7d6d5e149f571beeaa673 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-messaging-api-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-messaging-api-4.0.1.jar.sha1 deleted file mode 100644 index d3b7de3e0169b..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-messaging-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eb9c9971f6bd2a6681a2a692a1f29a35874de389 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-messaging-impl-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-messaging-impl-4.0.1.jar.sha1 deleted file mode 100644 index 1b7a6502cfd5d..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-messaging-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -938ef9b81f5aa5762d04c82e2e6b40cdb1ab4685 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-profile-api-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-profile-api-4.0.1.jar.sha1 deleted file mode 100644 index 541f1f16fa945..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-profile-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bece5f6d30d4051e6eeaf2b88dd1e5a13f6b28b7 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-profile-impl-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-profile-impl-4.0.1.jar.sha1 deleted file mode 100644 index 85b6386b4892b..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-profile-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74cb2a74d1e392f339d5772fb6fa436eb77459a0 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-saml-api-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-saml-api-4.0.1.jar.sha1 deleted file mode 100644 index 06a229f8ec672..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-saml-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2205aba935f4da468382a3dc5f32c3821ec1564c \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-saml-impl-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-saml-impl-4.0.1.jar.sha1 deleted file mode 100644 index e14c524517f44..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-saml-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -995986fd848ede1443469f3aff1f82b740224262 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-security-api-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-security-api-4.0.1.jar.sha1 deleted file mode 100644 index 8d9f052e4f4d5..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-security-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3d33ca18cde2a7c7e3643aeca9f03974be9577d \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-security-impl-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-security-impl-4.0.1.jar.sha1 deleted file mode 100644 index 435c03939695d..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-security-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -64568e9aa8bd7bcd76983e462f9eb2c3dcacbdce \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-soap-api-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-soap-api-4.0.1.jar.sha1 deleted file mode 100644 index 9937a971f62e8..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-soap-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d8e11e31cb5164788a530478e1831969e94a38b6 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-soap-impl-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-soap-impl-4.0.1.jar.sha1 deleted file mode 100644 index 3cd93b16d04ba..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-soap-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -38bfaf5fc189774e94ead218bd1c754da295c226 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-storage-api-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-storage-api-4.0.1.jar.sha1 deleted file mode 100644 index 8ba6db7318d83..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-storage-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4e46a7f965ac9f91976b0f298fd4d4e69e9056db \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-storage-impl-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-storage-impl-4.0.1.jar.sha1 deleted file mode 100644 index 64aa6efe38f9e..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-storage-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13f9fb617fb7d04ae976b4d0610171ea32291ee0 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-xmlsec-api-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-xmlsec-api-4.0.1.jar.sha1 deleted file mode 100644 index 31e06d580ebe5..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-xmlsec-api-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -edb4365d3d183933cf0d0b31966ea352b8d20c60 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-xmlsec-impl-4.0.1.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-xmlsec-impl-4.0.1.jar.sha1 deleted file mode 100644 index 63ef96356c4d7..0000000000000 --- a/x-pack/plugin/security/licenses/opensaml-xmlsec-impl-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -efa15ba85127ac3b20c75b8d4f04c7e92325a00a \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/slf4j-api-1.6.2.jar.sha1 b/x-pack/plugin/security/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/x-pack/plugin/security/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/xmlsec-2.1.4.jar.sha1 b/x-pack/plugin/security/licenses/xmlsec-2.1.4.jar.sha1 deleted file mode 100644 index d85a4194f6a59..0000000000000 --- a/x-pack/plugin/security/licenses/xmlsec-2.1.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb43326f02e3e77526c24269c8b5d3cc3f7f6653 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-reader-3.21.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-reader-3.21.0.jar.sha1 deleted file mode 100644 index 64ca426ffc24c..0000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jline-reader-3.21.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cd5c76dd2a47e9e0e7ab39821c0f62fa46e8581 diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-style-3.21.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-style-3.21.0.jar.sha1 deleted file mode 100644 index f35fec6b26e61..0000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jline-style-3.21.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97f09299c294d5cb97eeaaa77f8a48f308e11210 diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-3.21.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-3.21.0.jar.sha1 deleted file mode 100644 index 41e54b7e59bbd..0000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-3.21.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -047579f05a0002f9c561cb10dcf2f744976c49ac diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-jna-3.21.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-jna-3.21.0.jar.sha1 deleted file mode 100644 index 519f939cf77ae..0000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-jna-3.21.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed51f283abaef01b6a215e6fb69e86afea410499 diff --git a/x-pack/plugin/sql/sql-cli/licenses/jna-5.10.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jna-5.10.0.jar.sha1 deleted file mode 100644 index 3f0395cd5d178..0000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jna-5.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cf4c87dd802db50721db66947aa237d7ad09418 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/jackson-core-2.13.2.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/jackson-core-2.13.2.jar.sha1 deleted file mode 100644 index eb8a8bc45f041..0000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/jackson-core-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6a0e0620d51833feffc67bccb51937b2345763 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 deleted file mode 100644 index 3a4f0e1b17565..0000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/jackson-dataformat-cbor-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4fc77e1ec6922fc48bf1181e4b38f600dac222ff \ No newline at end of file diff --git a/x-pack/plugin/text-structure/licenses/icu4j-68.2.jar.sha1 b/x-pack/plugin/text-structure/licenses/icu4j-68.2.jar.sha1 deleted file mode 100644 index fcb3d79075099..0000000000000 --- a/x-pack/plugin/text-structure/licenses/icu4j-68.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -76893e6000401ace133a65262254be0ebe556d46 \ No newline at end of file diff --git a/x-pack/plugin/text-structure/licenses/super-csv-2.4.0.jar.sha1 b/x-pack/plugin/text-structure/licenses/super-csv-2.4.0.jar.sha1 deleted file mode 100644 index a0b402133090d..0000000000000 --- a/x-pack/plugin/text-structure/licenses/super-csv-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -017f8708c929029dde48bc298deaf3c7ae2452d3 \ No newline at end of file diff --git a/x-pack/plugin/vector-tile/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 b/x-pack/plugin/vector-tile/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 deleted file mode 100644 index f47bae03e8ea2..0000000000000 --- a/x-pack/plugin/vector-tile/licenses/log4j-slf4j-impl-2.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0ea6ef49f1349bb30e8c6e8a7052d0f3ee7a719 \ No newline at end of file diff --git a/x-pack/plugin/vector-tile/licenses/mapbox-vector-tile-3.1.0.jar.sha1 b/x-pack/plugin/vector-tile/licenses/mapbox-vector-tile-3.1.0.jar.sha1 deleted file mode 100644 index c98d2861a1bb8..0000000000000 --- a/x-pack/plugin/vector-tile/licenses/mapbox-vector-tile-3.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -06c4432c7885a3938571a57e73cc1444d7a39f12 \ No newline at end of file diff --git a/x-pack/plugin/vector-tile/licenses/protobuf-java-3.16.1.jar.sha1 b/x-pack/plugin/vector-tile/licenses/protobuf-java-3.16.1.jar.sha1 deleted file mode 100644 index c824fb121b2cd..0000000000000 --- a/x-pack/plugin/vector-tile/licenses/protobuf-java-3.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -23b80908eaf488134ceef1904e83e6f6821908c0 \ No newline at end of file diff --git a/x-pack/plugin/vector-tile/licenses/slf4j-api-1.6.2.jar.sha1 b/x-pack/plugin/vector-tile/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/x-pack/plugin/vector-tile/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/x-pack/plugin/watcher/licenses/failureaccess-1.0.1.jar.sha1 b/x-pack/plugin/watcher/licenses/failureaccess-1.0.1.jar.sha1 deleted file mode 100644 index 4798b37e20691..0000000000000 --- a/x-pack/plugin/watcher/licenses/failureaccess-1.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/x-pack/plugin/watcher/licenses/guava-27.1-jre.jar.sha1 b/x-pack/plugin/watcher/licenses/guava-27.1-jre.jar.sha1 deleted file mode 100644 index 07bf9c19e42b2..0000000000000 --- a/x-pack/plugin/watcher/licenses/guava-27.1-jre.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e47b59c893079b87743cdcfb6f17ca95c08c592c \ No newline at end of file diff --git a/x-pack/plugin/watcher/licenses/jakarta.activation-1.2.1.jar.sha1 b/x-pack/plugin/watcher/licenses/jakarta.activation-1.2.1.jar.sha1 deleted file mode 100644 index 20b21a541f29f..0000000000000 --- a/x-pack/plugin/watcher/licenses/jakarta.activation-1.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8013606426a73d8ba6b568370877251e91a38b89 \ No newline at end of file diff --git a/x-pack/plugin/watcher/licenses/jakarta.mail-1.6.4.jar.sha1 b/x-pack/plugin/watcher/licenses/jakarta.mail-1.6.4.jar.sha1 deleted file mode 100644 index 61664ee45cc15..0000000000000 --- a/x-pack/plugin/watcher/licenses/jakarta.mail-1.6.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5015f335c2b974b1a7d08718edc326f0dc613c8a \ No newline at end of file diff --git a/x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-20211018.2.jar.sha1 b/x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-20211018.2.jar.sha1 deleted file mode 100644 index 9402365d3132c..0000000000000 --- a/x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-20211018.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3226c13cf72633122e94810a53e60529dae2b80 \ No newline at end of file From b96c39e7ad62962ae025541bdbb4b6b790c729d6 Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Thu, 4 Aug 2022 10:02:28 +0200 Subject: [PATCH 099/265] [DOCS] Move completion type asciidoc (#89086) * [DOCS] Move completion type asciidoc * Fix failing code snippet test --- docs/reference/mapping/types.asciidoc | 2 + .../mapping/types/completion.asciidoc | 61 +++++++++++++++++++ .../suggesters/completion-suggest.asciidoc | 54 ++++------------ 3 files changed, 74 insertions(+), 43 deletions(-) create mode 100644 docs/reference/mapping/types/completion.asciidoc diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index c3116d56175ba..7108d536f8715 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -137,6 +137,8 @@ include::types/binary.asciidoc[] include::types/boolean.asciidoc[] +include::types/completion.asciidoc[] + include::types/date.asciidoc[] include::types/date_nanos.asciidoc[] diff --git a/docs/reference/mapping/types/completion.asciidoc b/docs/reference/mapping/types/completion.asciidoc new file mode 100644 index 0000000000000..d8b1ce98b5292 --- /dev/null +++ b/docs/reference/mapping/types/completion.asciidoc @@ -0,0 +1,61 @@ +[[completion]] +=== Completion field type +++++ +Completion +++++ +// tag::completion-mapping[] +To use the <>, map the field from +which you want to generate suggestions as type `completion`. This indexes the +field values for fast completions. + +[source,console] +-------------------------------------------------- +PUT music +{ + "mappings": { + "properties": { + "suggest": { + "type": "completion" + } + } + } +} +-------------------------------------------------- + +==== Parameters for `completion` fields + +The following parameters are accepted by `completion` fields: + +[horizontal] +<>:: + + The index analyzer to use, defaults to `simple`. + +<>:: + + The search analyzer to use, defaults to value of `analyzer`. + +`preserve_separators`:: + + Preserves the separators, defaults to `true`. + If disabled, you could find a field starting with `Foo Fighters`, if you + suggest for `foof`. + +`preserve_position_increments`:: + + Enables position increments, defaults to `true`. + If disabled and using stopwords analyzer, you could get a + field starting with `The Beatles`, if you suggest for `b`. *Note*: You + could also achieve this by indexing two inputs, `Beatles` and + `The Beatles`, no need to change a simple analyzer, if you are able to + enrich your data. + +`max_input_length`:: + + Limits the length of a single input, defaults to `50` UTF-16 code points. + This limit is only used at index time to reduce the total number of + characters per input string in order to prevent massive inputs from + bloating the underlying datastructure. Most use cases won't be influenced + by the default value since prefix completions seldom grow beyond prefixes longer + than a handful of characters. +// end::completion-mapping[] \ No newline at end of file diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 0ba27e7d90742..2237d209f1381 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -15,10 +15,18 @@ but are costly to build and are stored in-memory. [[completion-suggester-mapping]] ===== Mapping +include::../../mapping/types/completion.asciidoc[tag=completion-mapping] -To use this feature, specify a special mapping for this field, -which indexes the field values for fast completions. +[[indexing]] +===== Indexing + +You index suggestions like any other field. A suggestion is made of an +`input` and an optional `weight` attribute. An `input` is the expected +text to be matched by a suggestion query and the `weight` determines how +the suggestions will be scored. Indexing a suggestion is as follows: + +//// [source,console] -------------------------------------------------- PUT music @@ -27,53 +35,13 @@ PUT music "properties": { "suggest": { "type": "completion" - }, - "title": { - "type": "keyword" } } } } -------------------------------------------------- // TESTSETUP - -Mapping supports the following parameters: - -[horizontal] -`analyzer`:: - The index analyzer to use, defaults to `simple`. - -`search_analyzer`:: - The search analyzer to use, defaults to value of `analyzer`. - -`preserve_separators`:: - Preserves the separators, defaults to `true`. - If disabled, you could find a field starting with `Foo Fighters`, if you - suggest for `foof`. - -`preserve_position_increments`:: - Enables position increments, defaults to `true`. - If disabled and using stopwords analyzer, you could get a - field starting with `The Beatles`, if you suggest for `b`. *Note*: You - could also achieve this by indexing two inputs, `Beatles` and - `The Beatles`, no need to change a simple analyzer, if you are able to - enrich your data. - -`max_input_length`:: - Limits the length of a single input, defaults to `50` UTF-16 code points. - This limit is only used at index time to reduce the total number of - characters per input string in order to prevent massive inputs from - bloating the underlying datastructure. Most use cases won't be influenced - by the default value since prefix completions seldom grow beyond prefixes longer - than a handful of characters. - -[[indexing]] -===== Indexing - -You index suggestions like any other field. A suggestion is made of an -`input` and an optional `weight` attribute. An `input` is the expected -text to be matched by a suggestion query and the `weight` determines how -the suggestions will be scored. Indexing a suggestion is as follows: +//// [source,console] -------------------------------------------------- From bd797e5681988638cf02709585232604eb27ee47 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 4 Aug 2022 11:10:29 +0200 Subject: [PATCH 100/265] Update dependency verification for apm (#89104) --- gradle/verification-metadata.xml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 11942b0e49d5a..d51d3019be1ae 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -61,6 +61,11 @@ + + + + + @@ -1251,6 +1256,21 @@ + + + + + + + + + + + + + + + From 6ff0099ba3fdb3f21d7a49390b7211c70f343811 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Thu, 4 Aug 2022 13:00:32 +0200 Subject: [PATCH 101/265] [Transform][CI] add debug logging to find the cause of #88991 (#89080) add debug logging to find the cause of #88991 --- .../test/transform/transforms_start_stop.yml | 18 ++++++++++++++++++ .../action/TransportStopTransformAction.java | 6 ++++++ 2 files changed, 24 insertions(+) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/transforms_start_stop.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/transforms_start_stop.yml index de1af5936d2e1..e2cd1a44998a3 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/transforms_start_stop.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/transform/transforms_start_stop.yml @@ -61,6 +61,15 @@ setup: } } } + - do: + cluster.put_settings: + body: > + { + "persistent": { + "logger.org.elasticsearch.xpack.transform.action": "DEBUG" + } + } + --- teardown: @@ -91,6 +100,15 @@ teardown: - do: transform.delete_transform: transform_id: "airline-transform-start-stop-continuous" + - do: + cluster.put_settings: + body: > + { + "persistent": { + "logger.org.elasticsearch.xpack.transform.action": "INFO" + } + } + --- "Test start transform": - do: diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java index d54f6b7b7a0ef..6d14c4e1a86e5 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java @@ -295,6 +295,12 @@ private ActionListener waitForStopListener(Request request, ActionList // If there were failures attempting to stop the tasks, we don't know if they will actually stop. // It is better to respond to the user now than allow for the persistent task waiting to timeout if (response.getTaskFailures().isEmpty() == false || response.getNodeFailures().isEmpty() == false) { + logger.debug( + "[{}] Failure when waiting for transform to stop, task failures: [{}], node failures: [{}]", + request.getId(), + response.getTaskFailures(), + response.getNodeFailures() + ); RestStatus status = firstNotOKStatus(response.getTaskFailures(), response.getNodeFailures()); listener.onFailure(buildException(response.getTaskFailures(), response.getNodeFailures(), status)); return; From 418883aeb908dda081f4443d12abb017408b9a68 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Thu, 4 Aug 2022 15:09:57 +0300 Subject: [PATCH 102/265] maybeScheduleNow with delay 0 instead of 1 (#89110) Replace the 1 millisecond delay to 0 when we want to schedule a monitoring task now. --- .../java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java | 2 +- .../java/org/elasticsearch/health/node/LocalHealthMonitor.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index da3b4d0556b67..6776ab9d629a2 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -121,7 +121,7 @@ public class GeoIpDownloader extends AllocatedPersistentTask { public void setPollInterval(TimeValue pollInterval) { this.pollInterval = pollInterval; if (scheduled != null && scheduled.cancel()) { - scheduleNextRun(new TimeValue(1)); + scheduleNextRun(TimeValue.ZERO); } } diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index 8dc1afa6a99cc..8bdf4e6859d7c 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -99,7 +99,7 @@ private void maybeScheduleNextRun(TimeValue time) { // Helper method that starts the monitoring without a delay. private void maybeScheduleNow() { - maybeScheduleNextRun(TimeValue.timeValueMillis(1)); + maybeScheduleNextRun(TimeValue.ZERO); } @Override From 7f2331cdfb08fcd69f1f896b0d41c1b395ca9919 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 4 Aug 2022 13:18:41 +0100 Subject: [PATCH 103/265] Merge trivial changes from desired balance feature branch (#89109) --- .../cluster/allocation/ClusterRerouteIT.java | 4 +- .../UpdateShardAllocationSettingsIT.java | 3 +- .../gateway/ReplicaShardAllocatorIT.java | 8 +++- .../indices/create/AutoCreateAction.java | 2 +- .../metadata/MetadataCreateIndexService.java | 4 +- .../MetadataUpdateSettingsService.java | 46 ++++++++++--------- .../routing/BatchedRerouteService.java | 11 +++-- .../cluster/routing/RoutingNodes.java | 2 +- .../allocator/BalancedShardsAllocator.java | 6 ++- ...tadataMigrateToDataStreamServiceTests.java | 2 +- .../decider/AllocationDecidersTests.java | 29 ++++++------ .../org/elasticsearch/test/ESTestCase.java | 3 +- .../xpack/shutdown/NodeShutdownShardsIT.java | 3 +- 13 files changed, 69 insertions(+), 54 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 01c0428cf2802..7f2c44faf912d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -218,7 +218,7 @@ public void testDelayWithALargeAmountOfShards() throws Exception { .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 1) .build(); logger.info("--> starting 4 nodes"); - String node_1 = internalCluster().startNode(commonSettings); + String node1 = internalCluster().startNode(commonSettings); internalCluster().startNode(commonSettings); internalCluster().startNode(commonSettings); internalCluster().startNode(commonSettings); @@ -246,7 +246,7 @@ public void testDelayWithALargeAmountOfShards() throws Exception { ensureGreen(TimeValue.timeValueMinutes(1)); logger.info("--> stopping node1"); - internalCluster().stopNode(node_1); + internalCluster().stopNode(node1); // This might run slowly on older hardware ensureGreen(TimeValue.timeValueMinutes(2)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java index e9e6b3cbba70e..04da41318321e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java @@ -29,7 +29,7 @@ public class UpdateShardAllocationSettingsIT extends ESIntegTestCase { /** * Tests that updating the {@link EnableAllocationDecider} related settings works as expected. */ - public void testEnableRebalance() throws InterruptedException { + public void testEnableRebalance() { final String firstNode = internalCluster().startNode(); client().admin() .cluster() @@ -142,6 +142,7 @@ public void testUpdateSameHostSetting() { .prepareUpdateSettings() .setPersistentSettings(Settings.builder().put(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey(), false)) .get(); + clusterState = client().admin().cluster().prepareState().get().getState(); assertTrue( "all shards should be assigned", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java index 92d28927db24d..96b985e0286f9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java @@ -41,6 +41,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.allOf; @@ -360,7 +361,12 @@ public void testPreferCopyWithHighestMatchingOperations() throws Exception { client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().putNull("cluster.routing.allocation.enable").build()) + .setPersistentSettings( + Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) + .putNull(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey()) + .build() + ) ); ensureGreen(indexName); assertThat(internalCluster().nodesInclude(indexName), allOf(hasItem(nodeWithHigherMatching), not(hasItem(nodeWithLowerMatching)))); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index 817b56a4b2068..c09431d114d94 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -107,7 +107,7 @@ public TransportAction( this.createIndexService = createIndexService; this.metadataCreateDataStreamService = metadataCreateDataStreamService; this.autoCreateIndex = autoCreateIndex; - executor = (currentState, taskContexts) -> { + this.executor = (currentState, taskContexts) -> { ClusterState state = currentState; final Map successfulRequests = Maps.newMapWithExpectedSize(taskContexts.size()); for (final var taskContext : taskContexts) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 358134b0ca202..f64e2e002f8a5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -266,7 +266,7 @@ public void createIndex(final CreateIndexClusterStateUpdateRequest request, fina shardsAcknowledged -> { if (shardsAcknowledged == false) { logger.debug( - "[{}] index created, but the operation timed out while waiting for " + "enough shards to be started.", + "[{}] index created, but the operation timed out while waiting for enough shards to be started.", request.index() ); } else { @@ -371,7 +371,7 @@ public ClusterState applyCreateIndexRequest( final String v2Template = MetadataIndexTemplateService.findV2Template( currentState.metadata(), name, - isHiddenFromRequest == null ? false : isHiddenFromRequest + isHiddenFromRequest != null && isHiddenFromRequest ); if (v2Template != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index 1ba382c725670..5ac4190624b8a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -76,7 +76,7 @@ public MetadataUpdateSettingsService( try { final var task = taskContext.getTask(); state = task.execute(state); - taskContext.success(task); + taskContext.success(task.getAckListener()); } catch (Exception e) { taskContext.onFailure(e); } @@ -89,7 +89,7 @@ public MetadataUpdateSettingsService( }; } - private final class UpdateSettingsTask implements ClusterStateAckListener, ClusterStateTaskListener { + private final class UpdateSettingsTask implements ClusterStateTaskListener { private final UpdateSettingsClusterStateUpdateRequest request; private final ActionListener listener; @@ -98,29 +98,33 @@ private UpdateSettingsTask(UpdateSettingsClusterStateUpdateRequest request, Acti this.listener = listener; } - @Override - public boolean mustAck(DiscoveryNode discoveryNode) { - return true; - } + private ClusterStateAckListener getAckListener() { + return new ClusterStateAckListener() { + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + return true; + } - @Override - public void onAllNodesAcked() { - listener.onResponse(AcknowledgedResponse.of(true)); - } + @Override + public void onAllNodesAcked() { + listener.onResponse(AcknowledgedResponse.of(true)); + } - @Override - public void onAckFailure(Exception e) { - listener.onFailure(e); - } + @Override + public void onAckFailure(Exception e) { + listener.onFailure(e); + } - @Override - public void onAckTimeout() { - listener.onResponse(AcknowledgedResponse.of(false)); - } + @Override + public void onAckTimeout() { + listener.onResponse(AcknowledgedResponse.of(false)); + } - @Override - public TimeValue ackTimeout() { - return request.ackTimeout(); + @Override + public TimeValue ackTimeout() { + return request.ackTimeout(); + } + }; } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java b/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java index beb959ad4cedb..8cc59648d1dcb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.function.BiFunction; import static org.elasticsearch.core.Strings.format; @@ -39,17 +38,21 @@ public class BatchedRerouteService implements RerouteService { private static final String CLUSTER_UPDATE_TASK_SOURCE = "cluster_reroute"; private final ClusterService clusterService; - private final BiFunction reroute; + private final RerouteAction reroute; private final Object mutex = new Object(); @Nullable // null if no reroute is currently pending private List> pendingRerouteListeners; private Priority pendingTaskPriority = Priority.LANGUID; + public interface RerouteAction { + ClusterState reroute(ClusterState state, String reason); + } + /** * @param reroute Function that computes the updated cluster state after it has been rerouted. */ - public BatchedRerouteService(ClusterService clusterService, BiFunction reroute) { + public BatchedRerouteService(ClusterService clusterService, RerouteAction reroute) { this.clusterService = clusterService; this.reroute = reroute; } @@ -114,7 +117,7 @@ public ClusterState execute(ClusterState currentState) { } if (currentListenersArePending) { logger.trace("performing batched reroute [{}]", reason); - return reroute.apply(currentState, reason); + return reroute.reroute(currentState, reason); } else { logger.trace("batched reroute [{}] was promoted", reason); return currentState; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 88772ea8b6b8d..e14cd918a2bff 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -200,7 +200,7 @@ private void updateRecoveryCounts(final ShardRouting routing, final boolean incr if (routing.recoverySource().getType() == RecoverySource.Type.PEER) { // add/remove corresponding outgoing recovery on node with primary shard if (primary == null) { - throw new IllegalStateException("shard is peer recovering but primary is unassigned"); + throw new IllegalStateException("shard [" + routing + "] is peer recovering but primary is unassigned"); } Recoveries.getOrAdd(recoveriesPerNode, primary.currentNodeId()).addOutgoing(howMany); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 66c1c7f20016f..09180a462446f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -119,6 +119,8 @@ private void setThreshold(float threshold) { @Override public void allocate(RoutingAllocation allocation) { + assert allocation.ignoreDisable() == false; + if (allocation.routingNodes().size() == 0) { failAllocationOfNewPrimaries(allocation); return; @@ -536,7 +538,7 @@ private void balanceByWeights() { } if (logger.isTraceEnabled()) { logger.trace( - "Stop balancing index [{}] min_node [{}] weight: [{}]" + " max_node [{}] weight: [{}] delta: [{}]", + "Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]", index, maxNode.getNodeId(), weights[highIdx], @@ -549,7 +551,7 @@ private void balanceByWeights() { } if (logger.isTraceEnabled()) { logger.trace( - "Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]", + "Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]", maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java index 348d3db93e28e..2dc01e85a5844 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java @@ -376,7 +376,7 @@ public void testCreateDataStreamHidesBackingIndicesAndRemovesAlias() throws Exce } } - public void testCreateDataStreamWithoutSuppliedWriteIndex() throws Exception { + public void testCreateDataStreamWithoutSuppliedWriteIndex() { String dataStreamName = "foo"; AliasMetadata alias = AliasMetadata.builder(dataStreamName).build(); IndexMetadata foo1 = IndexMetadata.builder("foo1") diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java index c8a06783ebba6..0772e0fb905e5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecidersTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; @@ -28,6 +29,8 @@ import java.util.Collection; import java.util.List; +import static org.hamcrest.Matchers.equalTo; + public class AllocationDecidersTests extends ESTestCase { public void testDebugMode() { @@ -97,13 +100,7 @@ public Decision canRebalance(RoutingAllocation allocation) { final RoutingAllocation allocation = new RoutingAllocation(deciders, clusterState, null, null, 0L); allocation.setDebugMode(mode); - final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "_message"); - final ShardRouting shardRouting = ShardRouting.newUnassigned( - new ShardId(testIdx.getIndex(), 0), - true, - RecoverySource.ExistingStoreRecoverySource.INSTANCE, - unassignedInfo - ); + final ShardRouting shardRouting = createShardRouting(testIdx.getIndex()); RoutingNode routingNode = RoutingNodesHelper.routingNode("testNode", null); verify(deciders.canAllocate(shardRouting, routingNode, allocation), matcher); @@ -122,7 +119,7 @@ public Decision canRebalance(RoutingAllocation allocation) { } private void verify(Decision decision, Matcher> matcher) { - assertThat(decision.type(), Matchers.equalTo(Decision.Type.YES)); + assertThat(decision.type(), equalTo(Decision.Type.YES)); assertThat(decision, Matchers.instanceOf(Decision.Multi.class)); Decision.Multi multi = (Decision.Multi) decision; assertThat(multi.getDecisions(), matcher); @@ -238,12 +235,7 @@ private Decision decision(RoutingAllocation allocation) { .build(); // no debug should just short-circuit to no, no matter what kind of no type return the first decider returns - final ShardRouting shardRouting = ShardRouting.newUnassigned( - new ShardId(testIdx.getIndex(), 0), - true, - RecoverySource.ExistingStoreRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "_message") - ); + final ShardRouting shardRouting = createShardRouting(testIdx.getIndex()); final RoutingNode routingNode = RoutingNodesHelper.routingNode("testNode", null); final IndexMetadata indexMetadata = IndexMetadata.builder("idx") .settings(settings(Version.CURRENT)) @@ -276,4 +268,13 @@ private Decision decision(RoutingAllocation allocation) { assertEquals(expectedDebugDecision, allocationDeciders.canRebalance(allocation)); assertEquals(expectedDebugDecision, allocationDeciders.canForceAllocatePrimary(shardRouting, routingNode, allocation)); } + + private static ShardRouting createShardRouting(Index index) { + return ShardRouting.newUnassigned( + new ShardId(index, 0), + true, + RecoverySource.ExistingStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "_message") + ); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index dff2509032460..486eb6efc2570 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1226,8 +1226,7 @@ public Environment newEnvironment(Settings settings) { /** Return consistent index settings for the provided index version. */ public static Settings.Builder settings(Version version) { - Settings.Builder builder = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version); - return builder; + return Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version); } /** diff --git a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java index 2c0b203088649..11782520a9ffa 100644 --- a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java +++ b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java @@ -145,8 +145,7 @@ public void testNodeReplacementOnlyAllowsShardsFromReplacedNode() throws Excepti internalCluster().startNode(Settings.builder().put("node.name", nodeB)); final String nodeBId = getNodeId(nodeB); - logger.info("--> NodeA: {} -- {}", nodeA, nodeAId); - logger.info("--> NodeB: {} -- {}", nodeB, nodeBId); + logger.info("Started NodeB [{}] to replace NodeA [{}]", nodeBId, nodeAId); assertBusy(() -> { assertIndexPrimaryShardsAreAllocatedOnNode("myindex", nodeBId); From 2f0d9c8342cfe1e232b167b42b170a98b116e896 Mon Sep 17 00:00:00 2001 From: Thomas Decaux Date: Thu, 4 Aug 2022 15:04:28 +0200 Subject: [PATCH 104/265] [DOCS] Fix plugins CLI doc CLI_JAVA_OPTS env var (#89003) The commit https://github.com/elastic/elasticsearch/commit/1d4534f848feb396c00cee09fc1d0aef24a529a2 changes the env variable ``ES_JAVA_OPTS`` to ``CLI_JAVA_OPTS``. Doc must be updated as well. --- docs/plugins/plugin-script.asciidoc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index 6b3eb89a3958e..f04c181152306 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -109,7 +109,7 @@ to a local Java truststore and pass the location to the script as follows: + [source,shell] ----------------------------------- -sudo ES_JAVA_OPTS="-Djavax.net.ssl.trustStore=/path/to/trustStore.jks" bin/elasticsearch-plugin install https://host/plugin.zip +sudo CLI_JAVA_OPTS="-Djavax.net.ssl.trustStore=/path/to/trustStore.jks" bin/elasticsearch-plugin install https://host/plugin.zip ----------------------------------- -- @@ -261,19 +261,19 @@ sudo ES_PATH_CONF=/path/to/conf/dir bin/elasticsearch-plugin install Date: Thu, 4 Aug 2022 09:27:40 -0400 Subject: [PATCH 105/265] [DOCS] Replace ES_JAVA_OPTS with CLI_JAVA_OPTS (#89121) --- docs/reference/index-modules.asciidoc | 2 +- .../setup/advanced-configuration.asciidoc | 16 ++++++++-------- .../setup/important-settings/gc-logging.asciidoc | 4 ++-- docs/reference/setup/install/docker.asciidoc | 10 +++++----- .../setup/install/sysconfig-file.asciidoc | 2 +- .../reference/setup/install/zip-windows.asciidoc | 2 +- docs/reference/setup/sysconfig/swap.asciidoc | 4 ++-- 7 files changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index e258d8a74cc25..bb1ddbf9a3004 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -39,7 +39,7 @@ specific index module: `index.number_of_shards`:: The number of primary shards that an index should have. Defaults to `1`. This setting can only be set at index creation time. It cannot be changed on a closed index. + -NOTE: The number of shards are limited to `1024` per index. This limitation is a safety limit to prevent accidental creation of indices that can destabilize a cluster due to resource allocation. The limit can be modified by specifying `export ES_JAVA_OPTS="-Des.index.max_number_of_shards=128"` system property on every node that is part of the cluster. +NOTE: The number of shards are limited to `1024` per index. This limitation is a safety limit to prevent accidental creation of indices that can destabilize a cluster due to resource allocation. The limit can be modified by specifying `export CLI_JAVA_OPTS="-Des.index.max_number_of_shards=128"` system property on every node that is part of the cluster. // end::index-number-of-shards-tag[] diff --git a/docs/reference/setup/advanced-configuration.asciidoc b/docs/reference/setup/advanced-configuration.asciidoc index d2b513f49cad9..27dcd6b41dba7 100644 --- a/docs/reference/setup/advanced-configuration.asciidoc +++ b/docs/reference/setup/advanced-configuration.asciidoc @@ -9,7 +9,7 @@ is recommended in most circumstances. ==== Set JVM options If needed, you can override the default JVM options by adding custom options -files (preferred) or setting the `ES_JAVA_OPTS` environment variable. +files (preferred) or setting the `CLI_JAVA_OPTS` environment variable. JVM options files must have the suffix '.options' and contain a line-delimited list of JVM arguments. JVM processes options files in lexicographic order. @@ -70,16 +70,16 @@ as valid JVM arguments are rejected and {es} will fail to start. In production, use JVM options files to override the default settings. In testing and development environments, -you can also set JVM options through the `ES_JAVA_OPTS` environment variable. +you can also set JVM options through the `CLI_JAVA_OPTS` environment variable. [source,sh] --------------------------------- -export ES_JAVA_OPTS="$ES_JAVA_OPTS -Djava.io.tmpdir=/path/to/temp/dir" +export CLI_JAVA_OPTS="$CLI_JAVA_OPTS -Djava.io.tmpdir=/path/to/temp/dir" ./bin/elasticsearch --------------------------------- If you're using the RPM or Debian packages, you can specify -`ES_JAVA_OPTS` in the <>. +`CLI_JAVA_OPTS` in the <>. NOTE: {es} ignores the `JAVA_TOOL_OPTIONS` and `JAVA_OPTS` environment variables. @@ -141,16 +141,16 @@ For example, to set the maximum heap size to 2GB, set both `Xms` and `Xmx` to `2 -Xmx2g ------------------ -For testing, you can also set the heap sizes using the `ES_JAVA_OPTS` +For testing, you can also set the heap sizes using the `CLI_JAVA_OPTS` environment variable: [source,sh] ------------------ -ES_JAVA_OPTS="-Xms2g -Xmx2g" ./bin/elasticsearch +CLI_JAVA_OPTS="-Xms2g -Xmx2g" ./bin/elasticsearch ------------------ -The `ES_JAVA_OPTS` variable overrides all other JVM -options. We do not recommend using `ES_JAVA_OPTS` in production. +The `CLI_JAVA_OPTS` variable overrides all other JVM +options. We do not recommend using `CLI_JAVA_OPTS` in production. NOTE: If you are running {es} as a Windows service, you can change the heap size using the service manager. See <>. diff --git a/docs/reference/setup/important-settings/gc-logging.asciidoc b/docs/reference/setup/important-settings/gc-logging.asciidoc index 273ac3ca5baca..d48c70d455261 100644 --- a/docs/reference/setup/important-settings/gc-logging.asciidoc +++ b/docs/reference/setup/important-settings/gc-logging.asciidoc @@ -42,11 +42,11 @@ Change the default GC log output location to `/opt/my-app/gc.log` by Configure an {es} <> to send GC debug logs to standard error (`stderr`). This lets the container orchestrator - handle the output. If using the `ES_JAVA_OPTS` environment variable, + handle the output. If using the `CLI_JAVA_OPTS` environment variable, specify: [source,sh] ---- MY_OPTS="-Xlog:disable -Xlog:all=warning:stderr:utctime,level,tags -Xlog:gc=debug:stderr:utctime" -docker run -e ES_JAVA_OPTS="$MY_OPTS" # etc +docker run -e CLI_JAVA_OPTS="$MY_OPTS" # etc ---- diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 8e2e9b3e16858..45f8487669755 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -192,13 +192,13 @@ endif::[] If you experience issues where the container where your first node is running exits when your second node starts, explicitly set values for the JVM heap size. To <>, include the -`ES_JAVA_OPTS` variable and set values for `-Xms` and `-Xmx` when starting each +`CLI_JAVA_OPTS` variable and set values for `-Xms` and `-Xmx` when starting each node. For example, the following command starts node `es02` and sets the minimum and maximum JVM heap size to 1 GB: [source,sh,subs="attributes"] ---- -docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="" --name es02 -p 9201:9200 --net elastic -it docker.elastic.co/elasticsearch/elasticsearch:{docker-image} +docker run -e CLI_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="" --name es02 -p 9201:9200 --net elastic -it docker.elastic.co/elasticsearch/elasticsearch:{docker-image} ---- ===== Next steps @@ -500,10 +500,10 @@ To manually set the heap size in production, bind mount a <> file under `/usr/share/elasticsearch/config/jvm.options.d` that includes your desired <> settings. -For testing, you can also manually set the heap size using the `ES_JAVA_OPTS` +For testing, you can also manually set the heap size using the `CLI_JAVA_OPTS` environment variable. For example, to use 16GB, specify `-e -ES_JAVA_OPTS="-Xms16g -Xmx16g"` with `docker run`. The `ES_JAVA_OPTS` variable -overrides all other JVM options. We do not recommend using `ES_JAVA_OPTS` in +CLI_JAVA_OPTS="-Xms16g -Xmx16g"` with `docker run`. The `CLI_JAVA_OPTS` variable +overrides all other JVM options. We do not recommend using `CLI_JAVA_OPTS` in production. The `docker-compose.yml` file above sets the heap size to 512MB. diff --git a/docs/reference/setup/install/sysconfig-file.asciidoc b/docs/reference/setup/install/sysconfig-file.asciidoc index 9a1a06ef2f33b..6432f41f7d3e4 100644 --- a/docs/reference/setup/install/sysconfig-file.asciidoc +++ b/docs/reference/setup/install/sysconfig-file.asciidoc @@ -9,7 +9,7 @@ `jvm.options`, and `log4j2.properties` files); defaults to `/etc/elasticsearch`. -`ES_JAVA_OPTS`:: +`CLI_JAVA_OPTS`:: Any additional JVM system properties you may want to apply. diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index 95341189c67fa..cc45ac6783151 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -210,7 +210,7 @@ The {es} service can be configured prior to installation by setting the followin `jvm.options`, and `log4j2.properties` files), defaults to `%ES_HOME%\config`. -`ES_JAVA_OPTS`:: +`CLI_JAVA_OPTS`:: Any additional JVM system properties you may want to apply. diff --git a/docs/reference/setup/sysconfig/swap.asciidoc b/docs/reference/setup/sysconfig/swap.asciidoc index 86a9307f68271..1fa04b96d5d57 100644 --- a/docs/reference/setup/sysconfig/swap.asciidoc +++ b/docs/reference/setup/sysconfig/swap.asciidoc @@ -104,11 +104,11 @@ RPM and Debian:: Another possible reason why `mlockall` can fail is that <>. This can be solved by specifying -a new temporary directory for JNA using the `ES_JAVA_OPTS` environment variable: +a new temporary directory for JNA using the `CLI_JAVA_OPTS` environment variable: [source,sh] -------------- -export ES_JAVA_OPTS="$ES_JAVA_OPTS -Djna.tmpdir=" +export CLI_JAVA_OPTS="$CLI_JAVA_OPTS -Djna.tmpdir=" ./bin/elasticsearch -------------- From 54af7e005633bdba748459a4f0c2c4695aeb93f3 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Thu, 4 Aug 2022 17:37:21 +0300 Subject: [PATCH 106/265] [ML] Extract method for finding max task memory in autoscaling decider (#89126) --- .../MlAutoscalingDeciderService.java | 95 +++++++++++-------- 1 file changed, 55 insertions(+), 40 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java index 83cabd49c79c1..178313f474aa0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java @@ -580,44 +580,11 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider ); } - long largestJobOrModel = Math.max( - anomalyDetectionTasks.stream() - .filter(PersistentTask::isAssigned) - // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used - .mapToLong(t -> { - Long mem = this.getAnomalyMemoryRequirement(t); - assert mem != null : "unexpected null for anomaly memory requirement after recent stale check"; - return mem; - }) - .max() - .orElse(0L), - snapshotUpgradeTasks.stream() - .filter(PersistentTask::isAssigned) - // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used - .mapToLong(t -> { - Long mem = this.getAnomalyMemoryRequirement(t); - assert mem != null : "unexpected null for anomaly memory requirement after recent stale check"; - return mem; - }) - .max() - .orElse(0L) - ); - largestJobOrModel = Math.max( - largestJobOrModel, - dataframeAnalyticsTasks.stream() - .filter(PersistentTask::isAssigned) - // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used - .mapToLong(t -> { - Long mem = this.getAnalyticsMemoryRequirement(t); - assert mem != null : "unexpected null for analytics memory requirement after recent stale check"; - return mem; - }) - .max() - .orElse(0L) - ); - largestJobOrModel = Math.max( - largestJobOrModel, - modelAssignments.values().stream().mapToLong(t -> t.getTaskParams().estimateMemoryUsageBytes()).max().orElse(0L) + long maxTaskMemoryBytes = maxMemoryBytes( + anomalyDetectionTasks, + snapshotUpgradeTasks, + dataframeAnalyticsTasks, + modelAssignments.values() ); // This state is invalid, but may occur due to complex bugs that have slipped through testing. @@ -626,7 +593,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider // assignment explanation, for example because some other explanation overrides it. (This second situation // arises because, for example, anomalyDetectionTasks contains a task that is waiting but waitingAnomalyJobs // doesn't because its assignment explanation didn't match AWAITING_LAZY_ASSIGNMENT.) - if (largestJobOrModel == 0L) { + if (maxTaskMemoryBytes == 0L) { // We shouldn't need to check this condition because it's the exact opposite of the condition that // would have sent us down the scale down to zero branch higher up this method. assert anomalyDetectionTasks.isEmpty() == false @@ -659,7 +626,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider final Optional maybeScaleDown = checkForScaleDown( nodeLoads, - largestJobOrModel, + maxTaskMemoryBytes, currentScale, reasonBuilder ) @@ -739,6 +706,54 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider ); } + private long maxMemoryBytes( + Collection> anomalyDetectionTasks, + Collection> snapshotUpgradeTasks, + Collection> dataframeAnalyticsTasks, + Collection modelAssignments + ) { + long maxMemoryBytes = Math.max( + anomalyDetectionTasks.stream() + .filter(PersistentTask::isAssigned) + // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used + .mapToLong(t -> { + Long mem = this.getAnomalyMemoryRequirement(t); + assert mem != null : "unexpected null for anomaly memory requirement after recent stale check"; + return mem; + }) + .max() + .orElse(0L), + snapshotUpgradeTasks.stream() + .filter(PersistentTask::isAssigned) + // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used + .mapToLong(t -> { + Long mem = this.getAnomalyMemoryRequirement(t); + assert mem != null : "unexpected null for anomaly memory requirement after recent stale check"; + return mem; + }) + .max() + .orElse(0L) + ); + maxMemoryBytes = Math.max( + maxMemoryBytes, + dataframeAnalyticsTasks.stream() + .filter(PersistentTask::isAssigned) + // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used + .mapToLong(t -> { + Long mem = this.getAnalyticsMemoryRequirement(t); + assert mem != null : "unexpected null for analytics memory requirement after recent stale check"; + return mem; + }) + .max() + .orElse(0L) + ); + maxMemoryBytes = Math.max( + maxMemoryBytes, + modelAssignments.stream().mapToLong(t -> t.getTaskParams().estimateMemoryUsageBytes()).max().orElse(0L) + ); + return maxMemoryBytes; + } + static AutoscalingCapacity ensureScaleDown(AutoscalingCapacity scaleDownResult, AutoscalingCapacity currentCapacity) { if (scaleDownResult == null || currentCapacity == null) { return null; From 63a273d7a3c783ed8ff53b84a93682300ca0af6e Mon Sep 17 00:00:00 2001 From: weizijun Date: Thu, 4 Aug 2022 23:15:30 +0800 Subject: [PATCH 107/265] Fix failing test RollupActionIT.testRollupIndex (#89101) Fixes #88949 --- .../java/org/elasticsearch/xpack/ilm/actions/RollupActionIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/RollupActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/RollupActionIT.java index 7bfe664e686d4..d329e713a3b0e 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/RollupActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/RollupActionIT.java @@ -146,6 +146,7 @@ public void testRollupIndex() throws Exception { String phaseName = randomFrom("warm", "cold"); createNewSingletonPolicy(client(), policy, phaseName, new RollupILMAction(ConfigTestHelpers.randomInterval())); updatePolicy(client(), index, policy); + updateClusterSettings(client(), Settings.builder().put("indices.lifecycle.poll_interval", "5s").build()); String rollupIndex = waitAndGetRollupIndexName(client(), index); assertNotNull("Cannot retrieve rollup index name", rollupIndex); From 8f08c7b55bd7b9e950062abc843e33b480ef6a47 Mon Sep 17 00:00:00 2001 From: zhouhui Date: Thu, 4 Aug 2022 23:48:36 +0800 Subject: [PATCH 108/265] Override bulk visit methods of exitable point visitor (#82120) --- docs/changelog/82120.yaml | 5 +++++ .../search/internal/ExitableDirectoryReader.java | 13 +++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 docs/changelog/82120.yaml diff --git a/docs/changelog/82120.yaml b/docs/changelog/82120.yaml new file mode 100644 index 0000000000000..3f69bcd3b2fbb --- /dev/null +++ b/docs/changelog/82120.yaml @@ -0,0 +1,5 @@ +pr: 82120 +summary: Override bulk visit methods of exitable point visitor +area: Search +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java b/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java index a72bce0966050..da0319818b487 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java @@ -18,6 +18,7 @@ import org.apache.lucene.index.QueryTimeout; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.suggest.document.CompletionTerms; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.CompiledAutomaton; @@ -346,12 +347,24 @@ public void visit(int docID) throws IOException { in.visit(docID); } + @Override + public void visit(DocIdSetIterator iterator) throws IOException { + checkAndThrowWithSampling(); + in.visit(iterator); + } + @Override public void visit(int docID, byte[] packedValue) throws IOException { checkAndThrowWithSampling(); in.visit(docID, packedValue); } + @Override + public void visit(DocIdSetIterator iterator, byte[] packedValue) throws IOException { + checkAndThrowWithSampling(); + in.visit(iterator, packedValue); + } + @Override public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { queryCancellation.checkCancelled(); From c08111b5b7abd4778fbd3c84bc301ffb21eebbbd Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 4 Aug 2022 17:37:15 +0100 Subject: [PATCH 109/265] Avoid expensive call to Span.fromContextOrNull(null) (#89135) Workaround for #89107 --- .../elasticsearch/tracing/apm/APMTracer.java | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java index d1d7ce113b344..a5d43bb8a6672 100644 --- a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java +++ b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java @@ -286,7 +286,7 @@ private void setSpanAttributes(ThreadContext threadContext, @Nullable Map> { @Override From 188f8872c668645db78356c5f0e71691c44252d7 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Thu, 4 Aug 2022 18:39:04 +0100 Subject: [PATCH 110/265] [ML] ECS Grok patterns in the _text_structure/find_structure endpoint (#88982) Also add support for new CATALINA/TOMCAT timestamp formats used by ECS Grok patterns Relates #77065 Co-authored-by: David Roberts --- .../apis/find-structure.asciidoc | 49 +- .../api/text_structure.find_structure.json | 4 + x-pack/plugin/core/build.gradle | 1 + .../core/src/main/java/module-info.java | 1 + .../action/FindStructureAction.java | 37 + .../structurefinder/TextStructure.java | 43 + .../FindTextStructureActionRequestTests.java | 28 +- .../rest/RestFindStructureAction.java | 1 + .../DelimitedTextStructureFinder.java | 7 +- .../structurefinder/GrokPatternCreator.java | 267 +++- .../LogTextStructureFinder.java | 13 +- .../NdJsonTextStructureFinder.java | 4 +- .../TextStructureOverrides.java | 29 +- .../structurefinder/TextStructureUtils.java | 59 +- .../TimestampFormatFinder.java | 88 +- .../XmlTextStructureFinder.java | 4 +- .../GrokPatternCreatorTests.java | 1203 ++++++++++++----- .../TextStructureUtilsTests.java | 326 +++-- .../TimestampFormatFinderTests.java | 341 +++-- 19 files changed, 1762 insertions(+), 743 deletions(-) diff --git a/docs/reference/text-structure/apis/find-structure.asciidoc b/docs/reference/text-structure/apis/find-structure.asciidoc index fe3cf2fd54803..d9942d8e34b60 100644 --- a/docs/reference/text-structure/apis/find-structure.asciidoc +++ b/docs/reference/text-structure/apis/find-structure.asciidoc @@ -99,6 +99,16 @@ specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +`ecs_compatibility`:: +(Optional, string) The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of +legacy ones when the structure finder creates a Grok pattern. Valid values +are `disabled` and `v1`. The default value is `disabled`. This setting primarily +has an impact when a whole message Grok pattern such as `%{CATALINALOG}` +matches the input. If the structure finder identifies a common structure but +has no idea of meaning then generic field names such as `path`, `ipaddress`, +`field1` and `field2` are used in the `grok_pattern` output, with the intention +that a user who knows the meanings rename these fields before using it. `has_header_row`:: (Optional, Boolean) If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. @@ -286,15 +296,16 @@ If the request does not encounter errors, you receive the following result: "charset" : "UTF-8", <4> "has_byte_order_marker" : false, <5> "format" : "ndjson", <6> - "timestamp_field" : "release_date", <7> - "joda_timestamp_formats" : [ <8> + "ecs_compatibility" : "disabled", <7> + "timestamp_field" : "release_date", <8> + "joda_timestamp_formats" : [ <9> "ISO8601" ], - "java_timestamp_formats" : [ <9> + "java_timestamp_formats" : [ <10> "ISO8601" ], - "need_client_timezone" : true, <10> - "mappings" : { <11> + "need_client_timezone" : true, <11> + "mappings" : { <12> "properties" : { "@timestamp" : { "type" : "date" @@ -328,7 +339,7 @@ If the request does not encounter errors, you receive the following result: } ] }, - "field_stats" : { <12> + "field_stats" : { <13> "author" : { "count" : 24, "cardinality" : 20, @@ -536,19 +547,20 @@ may help diagnose parse errors or accidental uploads of the wrong text. <5> For UTF character encodings, `has_byte_order_marker` indicates whether the text begins with a byte order marker. <6> `format` is one of `ndjson`, `xml`, `delimited` or `semi_structured_text`. -<7> The `timestamp_field` names the field considered most likely to be the +<7> `ecs_compatibility` is either `disabled` or `v1`, defaults to `disabled`. +<8> The `timestamp_field` names the field considered most likely to be the primary timestamp of each document. -<8> `joda_timestamp_formats` are used to tell {ls} how to parse timestamps. -<9> `java_timestamp_formats` are the Java time formats recognized in the time +<9> `joda_timestamp_formats` are used to tell {ls} how to parse timestamps. +<10> `java_timestamp_formats` are the Java time formats recognized in the time fields. {es} mappings and ingest pipelines use this format. -<10> If a timestamp format is detected that does not include a timezone, +<11> If a timestamp format is detected that does not include a timezone, `need_client_timezone` will be `true`. The server that parses the text must therefore be told the correct timezone by the client. -<11> `mappings` contains some suitable mappings for an index into which the data +<12> `mappings` contains some suitable mappings for an index into which the data could be ingested. In this case, the `release_date` field has been given a `keyword` type as it is not considered specific enough to convert to the `date` type. -<12> `field_stats` contains the most common values of each field, plus basic +<13> `field_stats` contains the most common values of each field, plus basic numeric statistics for the numeric `page_count` field. This information may provide clues that the data needs to be cleaned or transformed prior to use by other {stack} functionality. @@ -1534,7 +1546,8 @@ This is an example of analyzing an {es} log file: [source,js] ---- -curl -s -H "Content-Type: application/json" -XPOST "localhost:9200/_text_structure/find_structure?pretty" -T "$ES_HOME/logs/elasticsearch.log" +curl -s -H "Content-Type: application/json" -XPOST +"localhost:9200/_text_structure/find_structure?pretty&ecs_compatibility=disabled" -T "$ES_HOME/logs/elasticsearch.log" ---- // NOTCONSOLE // Not converting to console because this shows how curl can be used @@ -1553,6 +1566,7 @@ this: "format" : "semi_structured_text", <1> "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", <2> "grok_pattern" : "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel}.*", <3> + "ecs_compatibility" : "disabled", <4> "timestamp_field" : "timestamp", "joda_timestamp_formats" : [ "ISO8601" @@ -1679,6 +1693,8 @@ in the first line of each multi-line log message. <3> A very simple `grok_pattern` has been created, which extracts the timestamp and recognizable fields that appear in every analyzed message. In this case the only field that was recognized beyond the timestamp was the log level. +<4> The ECS Grok pattern compatibility mode used, may be one of either `disabled` +(the default if not specified in the request) or `v1` [discrete] [[find-structure-example-grok]] @@ -1715,6 +1731,7 @@ this: "format" : "semi_structured_text", "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "grok_pattern" : "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} *\\]\\[%{JAVACLASS:class} *\\] \\[%{HOSTNAME:node}\\] %{JAVALOGMESSAGE:message}", <1> + "ecs_compatibility" : "disabled", <2> "timestamp_field" : "timestamp", "joda_timestamp_formats" : [ "ISO8601" @@ -1769,7 +1786,7 @@ this: } ] }, - "field_stats" : { <2> + "field_stats" : { <3> "class" : { "count" : 53, "cardinality" : 14, @@ -1945,7 +1962,9 @@ this: <1> The `grok_pattern` in the output is now the overridden one supplied in the query parameter. -<2> The returned `field_stats` include entries for the fields from the +<2> The ECS Grok pattern compatibility mode used, may be one of either `disabled` +(the default if not specified in the request) or `v1` +<3> The returned `field_stats` include entries for the fields from the overridden `grok_pattern`. The URL escaping is hard, so if you are working interactively it is best to use diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_structure.json b/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_structure.json index a0b8f9965f81a..c244db7aa8351 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_structure.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/text_structure.find_structure.json @@ -74,6 +74,10 @@ "type":"string", "description":"Optional parameter to specify the Grok pattern that should be used to extract fields from messages in a semi-structured text file" }, + "ecs_compatibility":{ + "type":"string", + "description":"Optional parameter to specify the compatibility mode with ECS Grok patterns - may be either 'v1' or 'disabled'" + }, "timestamp_field":{ "type":"string", "description":"Optional parameter to specify the timestamp field in the file" diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index f34278322a5fb..099bcfd389f5b 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -28,6 +28,7 @@ tasks.named("dependencyLicenses").configure { dependencies { compileOnly project(":server") + api project(':libs:elasticsearch-grok') api project(":libs:elasticsearch-ssl-config") api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" diff --git a/x-pack/plugin/core/src/main/java/module-info.java b/x-pack/plugin/core/src/main/java/module-info.java index 98e9f294f0ce8..b1c8da82a080a 100644 --- a/x-pack/plugin/core/src/main/java/module-info.java +++ b/x-pack/plugin/core/src/main/java/module-info.java @@ -8,6 +8,7 @@ module org.elasticsearch.xcore { requires org.elasticsearch.cli; requires org.elasticsearch.base; + requires org.elasticsearch.grok; requires org.elasticsearch.server; requires org.elasticsearch.sslconfig; requires org.elasticsearch.xcontent; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java index e1f4f55ff215b..431a830e5f2be 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.core.textstructure.action; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -16,6 +17,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.grok.Grok; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; @@ -30,6 +32,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; public class FindStructureAction extends ActionType { + public static final String ECS_COMPATIBILITY_DISABLED = Grok.ECS_COMPATIBILITY_MODES[0]; + public static final String ECS_COMPATIBILITY_V1 = Grok.ECS_COMPATIBILITY_MODES[1]; public static final FindStructureAction INSTANCE = new FindStructureAction(); public static final String NAME = "cluster:monitor/text_structure/findstructure"; @@ -107,6 +111,8 @@ public static class Request extends ActionRequest { public static final ParseField TIMESTAMP_FORMAT = new ParseField("timestamp_format"); public static final ParseField TIMESTAMP_FIELD = TextStructure.TIMESTAMP_FIELD; + public static final ParseField ECS_COMPATIBILITY = TextStructure.ECS_COMPATIBILITY; + private static final String ARG_INCOMPATIBLE_WITH_FORMAT_TEMPLATE = "[%s] may only be specified if [" + FORMAT.getPreferredName() + "] is [%s]"; @@ -122,6 +128,7 @@ public static class Request extends ActionRequest { private Character quote; private Boolean shouldTrimFields; private String grokPattern; + private String ecsCompatibility; private String timestampFormat; private String timestampField; private BytesReference sample; @@ -141,6 +148,11 @@ public Request(StreamInput in) throws IOException { quote = in.readBoolean() ? (char) in.readVInt() : null; shouldTrimFields = in.readOptionalBoolean(); grokPattern = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_8_5_0)) { + ecsCompatibility = in.readOptionalString(); + } else { + ecsCompatibility = null; + } timestampFormat = in.readOptionalString(); timestampField = in.readOptionalString(); sample = in.readBytesReference(); @@ -262,6 +274,14 @@ public void setGrokPattern(String grokPattern) { this.grokPattern = (grokPattern == null || grokPattern.isEmpty()) ? null : grokPattern; } + public String getEcsCompatibility() { + return ecsCompatibility; + } + + public void setEcsCompatibility(String ecsCompatibility) { + this.ecsCompatibility = (ecsCompatibility == null || ecsCompatibility.isEmpty()) ? null : ecsCompatibility; + } + public String getTimestampFormat() { return timestampFormat; } @@ -338,6 +358,18 @@ public ActionRequestValidationException validate() { ); } } + + if (ecsCompatibility != null && Grok.isValidEcsCompatibilityMode(ecsCompatibility) == false) { + validationException = addValidationError( + "[" + + ECS_COMPATIBILITY.getPreferredName() + + "] must be one of [" + + String.join(", ", Grok.ECS_COMPATIBILITY_MODES) + + "] if specified", + validationException + ); + } + if (sample == null || sample.length() == 0) { validationException = addValidationError("sample must be specified", validationException); } @@ -378,6 +410,9 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalBoolean(shouldTrimFields); out.writeOptionalString(grokPattern); + if (out.getVersion().onOrAfter(Version.V_8_5_0)) { + out.writeOptionalString(ecsCompatibility); + } out.writeOptionalString(timestampFormat); out.writeOptionalString(timestampField); out.writeBytesReference(sample); @@ -395,6 +430,7 @@ public int hashCode() { hasHeaderRow, delimiter, grokPattern, + ecsCompatibility, timestampFormat, timestampField, sample @@ -422,6 +458,7 @@ public boolean equals(Object other) { && Objects.equals(this.hasHeaderRow, that.hasHeaderRow) && Objects.equals(this.delimiter, that.delimiter) && Objects.equals(this.grokPattern, that.grokPattern) + && Objects.equals(this.ecsCompatibility, that.ecsCompatibility) && Objects.equals(this.timestampFormat, that.timestampFormat) && Objects.equals(this.timestampField, that.timestampField) && Objects.equals(this.sample, that.sample); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java index 2fd326bb8fa02..61203339b23c7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java @@ -6,9 +6,11 @@ */ package org.elasticsearch.xpack.core.textstructure.structurefinder; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.grok.Grok; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -84,6 +86,7 @@ public String toString() { public static final ParseField QUOTE = new ParseField("quote"); public static final ParseField SHOULD_TRIM_FIELDS = new ParseField("should_trim_fields"); public static final ParseField GROK_PATTERN = new ParseField("grok_pattern"); + public static final ParseField ECS_COMPATIBILITY = new ParseField("ecs_compatibility"); public static final ParseField TIMESTAMP_FIELD = new ParseField("timestamp_field"); public static final ParseField JODA_TIMESTAMP_FORMATS = new ParseField("joda_timestamp_formats"); public static final ParseField JAVA_TIMESTAMP_FORMATS = new ParseField("java_timestamp_formats"); @@ -110,6 +113,7 @@ public String toString() { PARSER.declareString((p, c) -> p.setQuote(c.charAt(0)), QUOTE); PARSER.declareBoolean(Builder::setShouldTrimFields, SHOULD_TRIM_FIELDS); PARSER.declareString(Builder::setGrokPattern, GROK_PATTERN); + PARSER.declareString(Builder::setEcsCompatibility, ECS_COMPATIBILITY); PARSER.declareString(Builder::setTimestampField, TIMESTAMP_FIELD); PARSER.declareStringArray(Builder::setJodaTimestampFormats, JODA_TIMESTAMP_FORMATS); PARSER.declareStringArray(Builder::setJavaTimestampFormats, JAVA_TIMESTAMP_FORMATS); @@ -126,6 +130,10 @@ public String toString() { PARSER.declareStringArray(Builder::setExplanation, EXPLANATION); } + private static String getNonNullEcsCompatibilityString(String ecsCompatibility) { + return (ecsCompatibility == null || ecsCompatibility.isEmpty()) ? Grok.ECS_COMPATIBILITY_MODES[0] : ecsCompatibility; + } + private final int numLinesAnalyzed; private final int numMessagesAnalyzed; private final String sampleStart; @@ -140,6 +148,7 @@ public String toString() { private final Character quote; private final Boolean shouldTrimFields; private final String grokPattern; + private final String ecsCompatibility; private final List jodaTimestampFormats; private final List javaTimestampFormats; private final String timestampField; @@ -164,6 +173,7 @@ public TextStructure( Character quote, Boolean shouldTrimFields, String grokPattern, + String ecsCompatibility, String timestampField, List jodaTimestampFormats, List javaTimestampFormats, @@ -188,6 +198,7 @@ public TextStructure( this.quote = quote; this.shouldTrimFields = shouldTrimFields; this.grokPattern = grokPattern; + this.ecsCompatibility = getNonNullEcsCompatibilityString(ecsCompatibility); this.timestampField = timestampField; this.jodaTimestampFormats = (jodaTimestampFormats == null) ? null : List.copyOf(jodaTimestampFormats); this.javaTimestampFormats = (javaTimestampFormats == null) ? null : List.copyOf(javaTimestampFormats); @@ -213,6 +224,11 @@ public TextStructure(StreamInput in) throws IOException { quote = in.readBoolean() ? (char) in.readVInt() : null; shouldTrimFields = in.readOptionalBoolean(); grokPattern = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_8_5_0)) { + ecsCompatibility = getNonNullEcsCompatibilityString(in.readString()); + } else { + ecsCompatibility = getNonNullEcsCompatibilityString(null); + } jodaTimestampFormats = in.readBoolean() ? Collections.unmodifiableList(in.readStringList()) : null; javaTimestampFormats = in.readBoolean() ? Collections.unmodifiableList(in.readStringList()) : null; timestampField = in.readOptionalString(); @@ -254,6 +270,9 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalBoolean(shouldTrimFields); out.writeOptionalString(grokPattern); + if (out.getVersion().onOrAfter(Version.V_8_5_0)) { + out.writeString(ecsCompatibility); + } if (jodaTimestampFormats == null) { out.writeBoolean(false); } else { @@ -335,6 +354,10 @@ public String getGrokPattern() { return grokPattern; } + public String getEcsCompatibility() { + return ecsCompatibility; + } + public String getTimestampField() { return timestampField; } @@ -403,6 +426,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (grokPattern != null && grokPattern.isEmpty() == false) { builder.field(GROK_PATTERN.getPreferredName(), grokPattern); } + builder.field(ECS_COMPATIBILITY.getPreferredName(), ecsCompatibility); if (timestampField != null && timestampField.isEmpty() == false) { builder.field(TIMESTAMP_FIELD.getPreferredName(), timestampField); } @@ -450,6 +474,7 @@ public int hashCode() { quote, shouldTrimFields, grokPattern, + ecsCompatibility, timestampField, jodaTimestampFormats, javaTimestampFormats, @@ -486,6 +511,7 @@ public boolean equals(Object other) { && Objects.equals(this.quote, that.quote) && Objects.equals(this.shouldTrimFields, that.shouldTrimFields) && Objects.equals(this.grokPattern, that.grokPattern) + && Objects.equals(this.ecsCompatibility, that.ecsCompatibility) && Objects.equals(this.timestampField, that.timestampField) && Objects.equals(this.jodaTimestampFormats, that.jodaTimestampFormats) && Objects.equals(this.javaTimestampFormats, that.javaTimestampFormats) @@ -511,6 +537,7 @@ public static class Builder { private Character quote; private Boolean shouldTrimFields; private String grokPattern; + private String ecsCompatibility; private String timestampField; private List jodaTimestampFormats; private List javaTimestampFormats; @@ -598,6 +625,11 @@ public Builder setGrokPattern(String grokPattern) { return this; } + public Builder setEcsCompatibility(String ecsCompatibility) { + this.ecsCompatibility = ecsCompatibility; + return this; + } + public Builder setTimestampField(String timestampField) { this.timestampField = timestampField; return this; @@ -721,6 +753,16 @@ public TextStructure build() { if (grokPattern == null || grokPattern.isEmpty()) { throw new IllegalArgumentException("Grok pattern must be specified for [" + format + "] structures."); } + if (ecsCompatibility != null + && ecsCompatibility.isEmpty() == false + && Grok.isValidEcsCompatibilityMode(ecsCompatibility) == false) { + throw new IllegalArgumentException( + ECS_COMPATIBILITY.getPreferredName() + + "] must be one of [" + + String.join(", ", Grok.ECS_COMPATIBILITY_MODES) + + "] if specified" + ); + } break; default: throw new IllegalStateException("enum value [" + format + "] missing from switch."); @@ -769,6 +811,7 @@ public TextStructure build() { quote, shouldTrimFields, grokPattern, + ecsCompatibility, timestampField, jodaTimestampFormats, javaTimestampFormats, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureActionRequestTests.java index 73a9f9caf86cf..99f61ce77c178 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/textstructure/action/FindTextStructureActionRequestTests.java @@ -59,6 +59,9 @@ protected FindStructureAction.Request createTestInstance() { if (randomBoolean()) { request.setGrokPattern(randomAlphaOfLength(80)); } + if (randomBoolean()) { + request.setEcsCompatibility(randomAlphaOfLength(80)); + } } } @@ -139,7 +142,6 @@ public void testValidateNonDelimited() { } public void testValidateNonSemiStructuredText() { - FindStructureAction.Request request = new FindStructureAction.Request(); request.setFormat(randomFrom(TextStructure.Format.NDJSON, TextStructure.Format.XML, TextStructure.Format.DELIMITED)); request.setGrokPattern(randomAlphaOfLength(80)); @@ -151,6 +153,30 @@ public void testValidateNonSemiStructuredText() { assertThat(e.getMessage(), containsString(" [grok_pattern] may only be specified if [format] is [semi_structured_text]")); } + public void testValidateEcsCompatibility() { + FindStructureAction.Request request = new FindStructureAction.Request(); + request.setFormat( + randomFrom( + TextStructure.Format.NDJSON, + TextStructure.Format.XML, + TextStructure.Format.DELIMITED, + TextStructure.Format.SEMI_STRUCTURED_TEXT + ) + ); + String ecsCompatibility = randomAlphaOfLength(80); + request.setEcsCompatibility(ecsCompatibility); + request.setSample(new BytesArray("foo\n")); + + ActionRequestValidationException e = request.validate(); + if (FindStructureAction.ECS_COMPATIBILITY_DISABLED.equalsIgnoreCase(ecsCompatibility) == false) { + assertNotNull(e); + assertThat(e.getMessage(), startsWith("Validation Failed: ")); + assertThat(e.getMessage(), containsString(" [ecs_compatibility] must be one of [disabled, v1] if specified")); + } else { + assertNull(e); + } + } + public void testValidateSample() { FindStructureAction.Request request = new FindStructureAction.Request(); diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java index e8f48787ca93c..e3bd497647c4a 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java @@ -72,6 +72,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient request.setQuote(restRequest.param(FindStructureAction.Request.QUOTE.getPreferredName())); request.setShouldTrimFields(restRequest.paramAsBoolean(FindStructureAction.Request.SHOULD_TRIM_FIELDS.getPreferredName(), null)); request.setGrokPattern(restRequest.param(FindStructureAction.Request.GROK_PATTERN.getPreferredName())); + request.setEcsCompatibility(restRequest.param(FindStructureAction.Request.ECS_COMPATIBILITY.getPreferredName())); request.setTimestampFormat(restRequest.param(FindStructureAction.Request.TIMESTAMP_FORMAT.getPreferredName())); request.setTimestampField(restRequest.param(FindStructureAction.Request.TIMESTAMP_FIELD.getPreferredName())); if (restRequest.hasContent()) { diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java index c1bee67c8520e..5a752d377c32e 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/DelimitedTextStructureFinder.java @@ -167,6 +167,7 @@ static DelimitedTextStructureFinder makeDelimitedTextStructureFinder( .setJodaTimestampFormats(timeField.v2().getJodaTimestampFormats()) .setJavaTimestampFormats(timeField.v2().getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) + .setEcsCompatibility(overrides.getEcsCompatibility()) .setIngestPipeline( TextStructureUtils.makeIngestPipelineDefinition( null, @@ -176,7 +177,8 @@ static DelimitedTextStructureFinder makeDelimitedTextStructureFinder( timeField.v1(), timeField.v2().getJavaTimestampFormats(), needClientTimeZone, - timeField.v2().needNanosecondPrecision() + timeField.v2().needNanosecondPrecision(), + overrides.getEcsCompatibility() ) ) .setMultilineStartPattern( @@ -206,7 +208,8 @@ static DelimitedTextStructureFinder makeDelimitedTextStructureFinder( null, null, false, - false + false, + null ) ); structureBuilder.setMultilineStartPattern( diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/GrokPatternCreator.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/GrokPatternCreator.java index ab011fa4adbed..7a24f39aae313 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/GrokPatternCreator.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/GrokPatternCreator.java @@ -33,6 +33,9 @@ */ public final class GrokPatternCreator { + private static final boolean ECS_COMPATIBILITY_DISABLED = false; + private static final boolean ECS_COMPATIBILITY_ENABLED = true; + private static final Logger logger = LogManager.getLogger(GrokPatternCreator.class); private static final Map PUNCTUATION_OR_SPACE_NEEDS_ESCAPING; static { @@ -51,60 +54,131 @@ public final class GrokPatternCreator { /** * Grok patterns that are designed to match the whole message, not just a part of it. */ - private static final List FULL_MATCH_GROK_PATTERNS = Arrays.asList( - FullMatchGrokPatternCandidate.fromGrokPatternName("BACULA_LOGLINE", "bts"), - FullMatchGrokPatternCandidate.fromGrokPatternName("CATALINALOG", "timestamp"), - FullMatchGrokPatternCandidate.fromGrokPatternName("COMBINEDAPACHELOG", "timestamp"), - FullMatchGrokPatternCandidate.fromGrokPatternName("COMMONAPACHELOG", "timestamp"), - FullMatchGrokPatternCandidate.fromGrokPatternName("ELB_ACCESS_LOG", "timestamp"), - FullMatchGrokPatternCandidate.fromGrokPatternName("HAPROXYHTTP", "syslog_timestamp"), - FullMatchGrokPatternCandidate.fromGrokPatternName("HAPROXYTCP", "syslog_timestamp"), - FullMatchGrokPatternCandidate.fromGrokPatternName("HTTPD20_ERRORLOG", "timestamp"), - FullMatchGrokPatternCandidate.fromGrokPatternName("HTTPD24_ERRORLOG", "timestamp"), - FullMatchGrokPatternCandidate.fromGrokPatternName("NAGIOSLOGLINE", "nagios_epoch"), - FullMatchGrokPatternCandidate.fromGrokPatternName("NETSCREENSESSIONLOG", "date"), - FullMatchGrokPatternCandidate.fromGrokPatternName("RAILS3", "timestamp"), - FullMatchGrokPatternCandidate.fromGrokPatternName("RUBY_LOGGER", "timestamp"), - FullMatchGrokPatternCandidate.fromGrokPatternName("SHOREWALL", "timestamp"), - FullMatchGrokPatternCandidate.fromGrokPatternName("TOMCATLOG", "timestamp") + private static final List FULL_MATCH_GROK_PATTERNS_LEGACY = Arrays.asList( + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("BACULA_LOGLINE", "bts"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("CATALINALOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("COMBINEDAPACHELOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("COMMONAPACHELOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("ELB_ACCESS_LOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("HAPROXYHTTP", "syslog_timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("HAPROXYTCP", "syslog_timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("HTTPD20_ERRORLOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("HTTPD24_ERRORLOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("NAGIOSLOGLINE", "nagios_epoch"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("NETSCREENSESSIONLOG", "date"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("RAILS3", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("RUBY_LOGGER", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("SHOREWALL", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameLegacy("TOMCATLOG", "timestamp") + ); + + private static final List FULL_MATCH_GROK_PATTERNS_ECS = Arrays.asList( + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("BACULA_LOGLINE", "bts"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("CATALINALOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("COMBINEDAPACHELOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("COMMONAPACHELOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("ELB_ACCESS_LOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("HAPROXYHTTP", "syslog_timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("HAPROXYTCP", "syslog_timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("HTTPD20_ERRORLOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("HTTPD24_ERRORLOG", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("NAGIOSLOGLINE", "nagios_epoch"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("NETSCREENSESSIONLOG", "date"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("RAILS3", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("RUBY_LOGGER", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("SHOREWALL", "timestamp"), + FullMatchGrokPatternCandidate.fromGrokPatternNameEcs("TOMCATLOG", "timestamp") ); /** * The first match in this list will be chosen, so it needs to be ordered * such that more generic patterns come after more specific patterns. */ - private static final List ORDERED_CANDIDATE_GROK_PATTERNS = Arrays.asList( - new ValueOnlyGrokPatternCandidate("TOMCAT_DATESTAMP", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("TIMESTAMP_ISO8601", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("DATESTAMP_RFC822", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("DATESTAMP_RFC2822", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("DATESTAMP_OTHER", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("DATESTAMP_EVENTLOG", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("HTTPDERROR_DATE", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("SYSLOGTIMESTAMP", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("HTTPDATE", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("CATALINA_DATESTAMP", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("CISCOTIMESTAMP", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("DATESTAMP", "date", "extra_timestamp"), - new ValueOnlyGrokPatternCandidate("LOGLEVEL", "keyword", "loglevel"), - new ValueOnlyGrokPatternCandidate("URI", "keyword", "uri"), - new ValueOnlyGrokPatternCandidate("UUID", "keyword", "uuid"), - new ValueOnlyGrokPatternCandidate("MAC", "keyword", "macaddress"), + private static final List ORDERED_CANDIDATE_GROK_PATTERNS_LEGACY = Arrays.asList( + new ValueOnlyGrokPatternCandidate("TOMCAT_DATESTAMP", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("TIMESTAMP_ISO8601", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("DATESTAMP_RFC822", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("DATESTAMP_RFC2822", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("DATESTAMP_OTHER", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("DATESTAMP_EVENTLOG", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("HTTPDERROR_DATE", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("SYSLOGTIMESTAMP", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("HTTPDATE", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("CATALINA_DATESTAMP", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("CISCOTIMESTAMP", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("DATESTAMP", "date", "extra_timestamp", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("LOGLEVEL", "keyword", "loglevel", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("URI", "keyword", "uri", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("UUID", "keyword", "uuid", ECS_COMPATIBILITY_DISABLED), + new ValueOnlyGrokPatternCandidate("MAC", "keyword", "macaddress", ECS_COMPATIBILITY_DISABLED), // Can't use \b as the breaks, because slashes are not "word" characters - new ValueOnlyGrokPatternCandidate("PATH", "keyword", "path", "(? ORDERED_CANDIDATE_GROK_PATTERNS_ECS = Arrays.asList( + new ValueOnlyGrokPatternCandidate("TOMCAT_DATESTAMP", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("TIMESTAMP_ISO8601", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("DATESTAMP_RFC822", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("DATESTAMP_RFC2822", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("DATESTAMP_OTHER", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("DATESTAMP_EVENTLOG", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("HTTPDERROR_DATE", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("SYSLOGTIMESTAMP", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("HTTPDATE", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("CATALINA_DATESTAMP", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("CISCOTIMESTAMP", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("DATESTAMP", "date", "extra_timestamp", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("LOGLEVEL", "keyword", "log.level", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("URI", "keyword", "url.original", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("UUID", "keyword", "uuid", ECS_COMPATIBILITY_ENABLED), + new ValueOnlyGrokPatternCandidate("MAC", "keyword", "macaddress", ECS_COMPATIBILITY_ENABLED), + // Can't use \b as the breaks, because slashes are not "word" characters + new ValueOnlyGrokPatternCandidate("PATH", "keyword", "path", "(? fieldNameCountStore = new HashMap<>(); private final StringBuilder overallGrokPatternBuilder = new StringBuilder(); private final TimeoutChecker timeoutChecker; + private final boolean ecsCompatibility; /** * @@ -142,6 +217,7 @@ public final class GrokPatternCreator { * @param fieldStats Will be updated with field stats for the fields in the returned pattern, if non-null. * @param customGrokPatternDefinitions Custom Grok pattern definitions to add to the built-in ones. * @param timeoutChecker Will abort the operation if its timeout is exceeded. + * @param ecsCompatibility The mode of compatibility with ECS compliant Grok patterns. */ public GrokPatternCreator( List explanation, @@ -149,19 +225,21 @@ public GrokPatternCreator( Map mappings, Map fieldStats, Map customGrokPatternDefinitions, - TimeoutChecker timeoutChecker + TimeoutChecker timeoutChecker, + boolean ecsCompatibility ) { this.explanation = Objects.requireNonNull(explanation); this.sampleMessages = Collections.unmodifiableCollection(sampleMessages); this.mappings = mappings; this.fieldStats = fieldStats; if (customGrokPatternDefinitions.isEmpty()) { - grokPatternDefinitions = Grok.getBuiltinPatterns(false); + grokPatternDefinitions = Grok.getBuiltinPatterns(ecsCompatibility); } else { - grokPatternDefinitions = new HashMap<>(Grok.getBuiltinPatterns(false)); + grokPatternDefinitions = new HashMap<>(Grok.getBuiltinPatterns(ecsCompatibility)); grokPatternDefinitions.putAll(customGrokPatternDefinitions); } this.timeoutChecker = Objects.requireNonNull(timeoutChecker); + this.ecsCompatibility = ecsCompatibility; } /** @@ -172,10 +250,10 @@ public GrokPatternCreator( */ public Tuple findFullLineGrokPattern(String timestampField) { - for (FullMatchGrokPatternCandidate candidate : FULL_MATCH_GROK_PATTERNS) { + for (FullMatchGrokPatternCandidate candidate : ecsCompatibility ? FULL_MATCH_GROK_PATTERNS_ECS : FULL_MATCH_GROK_PATTERNS_LEGACY) { if (timestampField == null || timestampField.equals(candidate.getTimeField())) { if (candidate.matchesAll(sampleMessages, timeoutChecker)) { - return candidate.processMatch(explanation, sampleMessages, mappings, fieldStats, timeoutChecker); + return candidate.processMatch(explanation, sampleMessages, mappings, fieldStats, timeoutChecker, ecsCompatibility); } } } @@ -198,7 +276,7 @@ public void validateFullLineGrokPattern(String grokPattern, String timestampFiel grokPatternDefinitions ); if (candidate.matchesAll(sampleMessages, timeoutChecker)) { - candidate.processMatch(explanation, sampleMessages, mappings, fieldStats, timeoutChecker); + candidate.processMatch(explanation, sampleMessages, mappings, fieldStats, timeoutChecker, ecsCompatibility); } else { throw new IllegalArgumentException("Supplied Grok pattern [" + grokPattern + "] does not match sample messages"); } @@ -260,7 +338,8 @@ private void processCandidateAndSplit( epilogues, mappings, fieldStats, - timeoutChecker + timeoutChecker, + ecsCompatibility ); appendBestGrokMatchForStrings(false, prefaces, ignoreKeyValueCandidateLeft, ignoreValueOnlyCandidatesLeft); overallGrokPatternBuilder.append(patternBuilderContent); @@ -288,9 +367,12 @@ void appendBestGrokMatchForStrings( bestCandidate = kvCandidate; } else { ignoreKeyValueCandidate = true; - for (GrokPatternCandidate candidate : ORDERED_CANDIDATE_GROK_PATTERNS.subList( + List orderedCandidateGrokPatterns = ecsCompatibility + ? ORDERED_CANDIDATE_GROK_PATTERNS_ECS + : ORDERED_CANDIDATE_GROK_PATTERNS_LEGACY; + for (GrokPatternCandidate candidate : orderedCandidateGrokPatterns.subList( ignoreValueOnlyCandidates, - ORDERED_CANDIDATE_GROK_PATTERNS.size() + orderedCandidateGrokPatterns.size() )) { if (candidate.matchesAll(snippets)) { bestCandidate = candidate; @@ -518,7 +600,8 @@ String processCaptures( Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker + TimeoutChecker timeoutChecker, + boolean ecsCompatibility ); } @@ -544,15 +627,16 @@ static class ValueOnlyGrokPatternCandidate implements GrokPatternCandidate { * @param grokPatternName Name of the Grok pattern to try to match - must match one defined in Logstash. * @param mappingType Data type for field in Elasticsearch mappings. * @param fieldName Name of the field to extract from the match. + * @param ecsCompatibility The mode of compatibility with ECS compliant Grok patterns. */ - ValueOnlyGrokPatternCandidate(String grokPatternName, String mappingType, String fieldName) { + ValueOnlyGrokPatternCandidate(String grokPatternName, String mappingType, String fieldName, boolean ecsCompatibility) { this( grokPatternName, Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, mappingType), fieldName, "\\b", "\\b", - Grok.getBuiltinPatterns(false) + Grok.getBuiltinPatterns(ecsCompatibility) ); } @@ -584,15 +668,23 @@ static class ValueOnlyGrokPatternCandidate implements GrokPatternCandidate { * @param fieldName Name of the field to extract from the match. * @param preBreak Only consider the match if it's broken from the previous text by this. * @param postBreak Only consider the match if it's broken from the following text by this. + * @param ecsCompatibility The mode of compatibility with ECS compliant Grok patterns. */ - ValueOnlyGrokPatternCandidate(String grokPatternName, String mappingType, String fieldName, String preBreak, String postBreak) { + ValueOnlyGrokPatternCandidate( + String grokPatternName, + String mappingType, + String fieldName, + String preBreak, + String postBreak, + boolean ecsCompatibility + ) { this( grokPatternName, Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, mappingType), fieldName, preBreak, postBreak, - Grok.getBuiltinPatterns(false) + Grok.getBuiltinPatterns(ecsCompatibility) ); } @@ -655,7 +747,8 @@ public String processCaptures( Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker + TimeoutChecker timeoutChecker, + boolean ecsCompatibility ) { Collection values = new ArrayList<>(); for (String snippet : snippets) { @@ -673,7 +766,7 @@ public String processCaptures( // If the mapping is type "date" with no format, try to adjust it to include the format if (TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT.equals(adjustedMapping)) { try { - adjustedMapping = TextStructureUtils.findTimestampMapping(explanation, values, timeoutChecker); + adjustedMapping = TextStructureUtils.findTimestampMapping(explanation, values, timeoutChecker, ecsCompatibility); } catch (IllegalArgumentException e) { // This feels like it shouldn't happen, but there may be some obscure edge case // where it does, and in production it will cause less frustration to just return @@ -736,13 +829,14 @@ public String processCaptures( Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker + TimeoutChecker timeoutChecker, + boolean ecsCompatibility ) { if (fieldName == null) { throw new IllegalStateException("Cannot process KV matches until a field name has been determined"); } Grok grok = new Grok( - Grok.getBuiltinPatterns(false), + Grok.getBuiltinPatterns(ecsCompatibility), "(?m)%{DATA:" + PREFACE + "}\\b" + fieldName + "=%{USER:" + VALUE + "}%{GREEDYDATA:" + EPILOGUE + "}", TimeoutChecker.watchdog, logger::warn @@ -760,7 +854,13 @@ public String processCaptures( timeoutChecker.check("full message Grok pattern field extraction"); } String adjustedFieldName = buildFieldName(fieldNameCountStore, fieldName); - Map mapping = TextStructureUtils.guessScalarMapping(explanation, adjustedFieldName, values, timeoutChecker); + Map mapping = TextStructureUtils.guessScalarMapping( + explanation, + adjustedFieldName, + values, + timeoutChecker, + ecsCompatibility + ); timeoutChecker.check("mapping determination"); if (mappings != null) { mappings.put(adjustedFieldName, mapping); @@ -795,9 +895,20 @@ public String processCaptures( Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker + TimeoutChecker timeoutChecker, + boolean ecsCompatibility ) { - return super.processCaptures(explanation, fieldNameCountStore, snippets, prefaces, epilogues, null, fieldStats, timeoutChecker); + return super.processCaptures( + explanation, + fieldNameCountStore, + snippets, + prefaces, + epilogues, + null, + fieldStats, + timeoutChecker, + ecsCompatibility + ); } } @@ -810,8 +921,20 @@ static class FullMatchGrokPatternCandidate { private final String timeField; private final Grok grok; - static FullMatchGrokPatternCandidate fromGrokPatternName(String grokPatternName, String timeField) { - return new FullMatchGrokPatternCandidate("%{" + grokPatternName + "}", timeField, Grok.getBuiltinPatterns(false)); + static FullMatchGrokPatternCandidate fromGrokPatternNameLegacy(String grokPatternName, String timeField) { + return new FullMatchGrokPatternCandidate( + "%{" + grokPatternName + "}", + timeField, + Grok.getBuiltinPatterns(ECS_COMPATIBILITY_DISABLED) + ); + } + + static FullMatchGrokPatternCandidate fromGrokPatternNameEcs(String grokPatternName, String timeField) { + return new FullMatchGrokPatternCandidate( + "%{" + grokPatternName + "}", + timeField, + Grok.getBuiltinPatterns(ECS_COMPATIBILITY_ENABLED) + ); } static FullMatchGrokPatternCandidate fromGrokPatternName( @@ -822,8 +945,12 @@ static FullMatchGrokPatternCandidate fromGrokPatternName( return new FullMatchGrokPatternCandidate("%{" + grokPatternName + "}", timeField, grokPatternDefinitions); } - static FullMatchGrokPatternCandidate fromGrokPattern(String grokPattern, String timeField) { - return new FullMatchGrokPatternCandidate(grokPattern, timeField, Grok.getBuiltinPatterns(false)); + static FullMatchGrokPatternCandidate fromGrokPatternLegacy(String grokPattern, String timeField) { + return new FullMatchGrokPatternCandidate(grokPattern, timeField, Grok.getBuiltinPatterns(ECS_COMPATIBILITY_DISABLED)); + } + + static FullMatchGrokPatternCandidate fromGrokPatternEcs(String grokPattern, String timeField) { + return new FullMatchGrokPatternCandidate(grokPattern, timeField, Grok.getBuiltinPatterns(ECS_COMPATIBILITY_ENABLED)); } static FullMatchGrokPatternCandidate fromGrokPattern( @@ -863,7 +990,8 @@ public Tuple processMatch( Collection sampleMessages, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker + TimeoutChecker timeoutChecker, + boolean ecsCompatibility ) { if (grokPattern.startsWith("%{") && grokPattern.endsWith("}")) { @@ -905,7 +1033,8 @@ public Tuple processMatch( explanation, fieldName, valuesForField.getValue(), - timeoutChecker + timeoutChecker, + ecsCompatibility ); timeoutChecker.check("mapping determination"); // Exclude the time field because that will be dropped and replaced with @timestamp diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java index 0f7d4adb2b4f5..f61bc06105e2b 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/LogTextStructureFinder.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.textstructure.structurefinder; import org.elasticsearch.core.Tuple; +import org.elasticsearch.grok.Grok; import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; import org.elasticsearch.xpack.core.textstructure.structurefinder.FieldStats; import org.elasticsearch.xpack.core.textstructure.structurefinder.TextStructure; @@ -146,14 +147,17 @@ static LogTextStructureFinder makeLogTextStructureFinder( fieldStats.put("message", TextStructureUtils.calculateFieldStats(messageMapping, sampleMessages, timeoutChecker)); Map customGrokPatternDefinitions = timestampFormatFinder.getCustomGrokPatternDefinitions(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator( explanation, sampleMessages, fieldMappings, fieldStats, customGrokPatternDefinitions, - timeoutChecker + timeoutChecker, + Grok.ECS_COMPATIBILITY_MODES[1].equals(overrides.getEcsCompatibility()) ); + // We can't parse directly into @timestamp using Grok, so parse to some other time field, which the date filter will then remove String interimTimestampField = overrides.getTimestampField(); String grokPattern = overrides.getGrokPattern(); @@ -191,6 +195,7 @@ static LogTextStructureFinder makeLogTextStructureFinder( .setJavaTimestampFormats(timestampFormatFinder.getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) .setGrokPattern(grokPattern) + .setEcsCompatibility(overrides.getEcsCompatibility()) .setIngestPipeline( TextStructureUtils.makeIngestPipelineDefinition( grokPattern, @@ -200,7 +205,8 @@ static LogTextStructureFinder makeLogTextStructureFinder( interimTimestampField, timestampFormatFinder.getJavaTimestampFormats(), needClientTimeZone, - timestampFormatFinder.needNanosecondPrecision() + timestampFormatFinder.needNanosecondPrecision(), + overrides.getEcsCompatibility() ) ) .setMappings(Collections.singletonMap(TextStructureUtils.MAPPING_PROPERTIES_SETTING, fieldMappings)) @@ -238,7 +244,8 @@ static TimestampFormatFinder populateTimestampFormatFinder( false, false, false, - timeoutChecker + timeoutChecker, + Grok.ECS_COMPATIBILITY_MODES[1].equals(overrides.getEcsCompatibility()) ); for (String sampleLine : sampleLines) { diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java index 9cb3d64300fcf..eded7e6e5493c 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java @@ -68,6 +68,7 @@ static NdJsonTextStructureFinder makeNdJsonTextStructureFinder( .setJodaTimestampFormats(timeField.v2().getJodaTimestampFormats()) .setJavaTimestampFormats(timeField.v2().getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) + .setEcsCompatibility(overrides.getEcsCompatibility()) .setIngestPipeline( TextStructureUtils.makeIngestPipelineDefinition( null, @@ -79,7 +80,8 @@ static NdJsonTextStructureFinder makeNdJsonTextStructureFinder( timeField.v1(), timeField.v2().getJavaTimestampFormats(), needClientTimeZone, - timeField.v2().needNanosecondPrecision() + timeField.v2().needNanosecondPrecision(), + overrides.getEcsCompatibility() ) ); } diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java index 4d95d302b4a14..5ba4e464508f1 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureOverrides.java @@ -35,6 +35,8 @@ public class TextStructureOverrides { private final String timestampFormat; private final String timestampField; + private final String ecsCompatibility; + public TextStructureOverrides(FindStructureAction.Request request) { this( @@ -47,7 +49,8 @@ public TextStructureOverrides(FindStructureAction.Request request) { request.getShouldTrimFields(), request.getGrokPattern(), request.getTimestampFormat(), - request.getTimestampField() + request.getTimestampField(), + request.getEcsCompatibility() ); } @@ -61,7 +64,8 @@ private TextStructureOverrides( Boolean shouldTrimFields, String grokPattern, String timestampFormat, - String timestampField + String timestampField, + String ecsCompatibility ) { this.charset = charset; this.format = format; @@ -73,6 +77,7 @@ private TextStructureOverrides( this.grokPattern = grokPattern; this.timestampFormat = timestampFormat; this.timestampField = timestampField; + this.ecsCompatibility = ecsCompatibility; } public static Builder builder() { @@ -119,6 +124,10 @@ public String getTimestampField() { return timestampField; } + public String getEcsCompatibility() { + return ecsCompatibility; + } + @Override public int hashCode() { @@ -132,7 +141,8 @@ public int hashCode() { shouldTrimFields, grokPattern, timestampFormat, - timestampField + timestampField, + ecsCompatibility ); } @@ -157,7 +167,8 @@ public boolean equals(Object other) { && Objects.equals(this.shouldTrimFields, that.shouldTrimFields) && Objects.equals(this.grokPattern, that.grokPattern) && Objects.equals(this.timestampFormat, that.timestampFormat) - && Objects.equals(this.timestampField, that.timestampField); + && Objects.equals(this.timestampField, that.timestampField) + && Objects.equals(this.ecsCompatibility, that.ecsCompatibility); } public static class Builder { @@ -173,6 +184,8 @@ public static class Builder { private String timestampFormat; private String timestampField; + private String ecsCompatibility; + public Builder setCharset(String charset) { this.charset = charset; return this; @@ -223,6 +236,11 @@ public Builder setTimestampField(String timestampField) { return this; } + public Builder setEcsCompatibility(String ecsCompatibility) { + this.ecsCompatibility = ecsCompatibility; + return this; + } + public TextStructureOverrides build() { return new TextStructureOverrides( @@ -235,7 +253,8 @@ public TextStructureOverrides build() { shouldTrimFields, grokPattern, timestampFormat, - timestampField + timestampField, + ecsCompatibility ); } } diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureUtils.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureUtils.java index 9ae68e654cbcb..b6065cecf93cb 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureUtils.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureUtils.java @@ -29,6 +29,9 @@ public final class TextStructureUtils { + // The ECS Grok pattern compatibility mode to use when no ecs_compatibility parameter is specified in the request. + private static final boolean DEFAULT_ECS_COMPATIBILITY = false; + private static final Logger logger = LogManager.getLogger(TextStructureUtils.class); public static final String DEFAULT_TIMESTAMP_FIELD = "@timestamp"; public static final String MAPPING_TYPE_SETTING = "type"; @@ -218,7 +221,8 @@ private static List> findCandidates( true, true, true, - timeoutChecker + timeoutChecker, + Grok.ECS_COMPATIBILITY_MODES[1].equals(overrides.getEcsCompatibility()) ); try { timestampFormatFinder.addSample(value.toString()); @@ -259,6 +263,24 @@ static Tuple, SortedMap> guessMapp List> sampleRecords, TimeoutChecker timeoutChecker ) { + return guessMappingsAndCalculateFieldStats(explanation, sampleRecords, timeoutChecker, DEFAULT_ECS_COMPATIBILITY); + } + + /** + * Given the sampled records, guess appropriate Elasticsearch mappings. + * @param explanation List of reasons for making decisions. May contain items when passed and new reasons + * can be appended by this method. + * @param sampleRecords The sampled records. + * @param timeoutChecker Will abort the operation if its timeout is exceeded. + * @param ecsCompatibility The mode of compatibility with ECS Grok patterns + * @return A map of field name to mapping settings. + */ + static Tuple, SortedMap> guessMappingsAndCalculateFieldStats( + List explanation, + List> sampleRecords, + TimeoutChecker timeoutChecker, + boolean ecsCompatibility + ) { SortedMap mappings = new TreeMap<>(); SortedMap fieldStats = new TreeMap<>(); @@ -276,7 +298,8 @@ static Tuple, SortedMap> guessMapp explanation, fieldName, fieldValues, - timeoutChecker + timeoutChecker, + ecsCompatibility ); if (mappingAndFieldStats != null) { if (mappingAndFieldStats.v1() != null) { @@ -295,7 +318,8 @@ static Tuple, FieldStats> guessMappingAndCalculateFieldStats List explanation, String fieldName, List fieldValues, - TimeoutChecker timeoutChecker + TimeoutChecker timeoutChecker, + boolean ecsCompatibility ) { if (fieldValues == null || fieldValues.isEmpty()) { // We can get here if all the records that contained a given field had a null value for it. @@ -318,12 +342,13 @@ static Tuple, FieldStats> guessMappingAndCalculateFieldStats explanation, fieldName, fieldValues.stream().flatMap(TextStructureUtils::flatten).collect(Collectors.toList()), - timeoutChecker + timeoutChecker, + ecsCompatibility ); } Collection fieldValuesAsStrings = fieldValues.stream().map(Object::toString).collect(Collectors.toList()); - Map mapping = guessScalarMapping(explanation, fieldName, fieldValuesAsStrings, timeoutChecker); + Map mapping = guessScalarMapping(explanation, fieldName, fieldValuesAsStrings, timeoutChecker, ecsCompatibility); timeoutChecker.check("mapping determination"); return new Tuple<>(mapping, calculateFieldStats(mapping, fieldValuesAsStrings, timeoutChecker)); } @@ -355,11 +380,19 @@ private static Stream flatten(Object value) { static Map findTimestampMapping( List explanation, Collection fieldValues, - TimeoutChecker timeoutChecker + TimeoutChecker timeoutChecker, + boolean ecsCompatibility ) { assert fieldValues.isEmpty() == false; - TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, timeoutChecker); + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder( + explanation, + true, + true, + true, + timeoutChecker, + ecsCompatibility + ); fieldValues.forEach(timestampFormatFinder::addSample); return timestampFormatFinder.getEsDateMappingTypeWithFormat(); } @@ -382,7 +415,8 @@ static Map guessScalarMapping( List explanation, String fieldName, Collection fieldValues, - TimeoutChecker timeoutChecker + TimeoutChecker timeoutChecker, + boolean ecsCompatibility ) { assert fieldValues.isEmpty() == false; @@ -391,7 +425,7 @@ static Map guessScalarMapping( } try { - return findTimestampMapping(explanation, fieldValues, timeoutChecker); + return findTimestampMapping(explanation, fieldValues, timeoutChecker, ecsCompatibility); } catch (IllegalArgumentException e) { // To be mapped as type "date" all the values must match the same timestamp format - if // they don't we'll end up here, and move on to try other possible mappings @@ -475,7 +509,8 @@ public static Map makeIngestPipelineDefinition( String timestampField, List timestampFormats, boolean needClientTimezone, - boolean needNanosecondPrecision + boolean needNanosecondPrecision, + String ecsCompatibility ) { if (grokPattern == null && csvProcessorSettings == null && timestampField == null) { @@ -494,6 +529,10 @@ public static Map makeIngestPipelineDefinition( if (customGrokPatternDefinitions.isEmpty() == false) { grokProcessorSettings.put("pattern_definitions", customGrokPatternDefinitions); } + grokProcessorSettings.put( + "ecs_compatibility", + (ecsCompatibility == null || ecsCompatibility.isEmpty()) ? Grok.ECS_COMPATIBILITY_MODES[0] : ecsCompatibility + ); processors.add(Collections.singletonMap("grok", grokProcessorSettings)); } else { assert customGrokPatternDefinitions.isEmpty(); diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinder.java index 1c7da0fa83b37..93d7db489f905 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinder.java @@ -48,6 +48,7 @@ */ public final class TimestampFormatFinder { + private static final boolean ECS_COMPATIBILITY = false; private static final String PREFACE = "preface"; private static final String EPILOGUE = "epilogue"; @@ -288,6 +289,50 @@ public final class TimestampFormatFinder { ) ); + /** + * The first match in this list will be chosen, so it needs to be ordered + * such that more generic patterns come after more specific patterns. + */ + static final List ORDERED_CANDIDATE_FORMATS_ECS_V1; + static { + List items = new ArrayList<>(); + // CATALINA8_DATESTAMP %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} + // Where SECOND is defined as (?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) + // ('60' is a leap second in most time standards and thus is valid.) + // 29-Aug-2021 12:03:33.578 + items.add( + new CandidateTimestampFormat( + example -> Collections.singletonList( + CandidateTimestampFormat.adjustFractionalSecondsFromEndOfExample(example, "dd-MMM-yyyy hh:mm:ss") + ), + "\\b\\d{2}-[A-Z]\\S{2}-\\d{4} \\d{2}:\\d{2}:\\d{2}[:.,]\\d{3}", + "\\b%{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND}\\b", + "CATALINA8_DATESTAMP", + "11 1111 11 11 11 111", + 0, + 0 + ) + ); + // CATALINA7_DATESTAMP %{MONTH} %{MONTHDAY}, %{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} (?:AM|PM) + items.add( + new CandidateTimestampFormat( + example -> Collections.singletonList("MMM dd, yyyy h:mm:ss a"), + "\\b[A-Z]\\S{2} \\d{2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", + "\\b%{MONTH} %{MONTHDAY}, %{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} (?:AM|PM)\\b", + "CATALINA7_DATESTAMP", + Arrays.asList(" 11 1111 1 11 11", " 11 1111 11 11 11"), + 0, + 3 + ) + ); + items.addAll( + ORDERED_CANDIDATE_FORMATS.stream() + .filter(p -> "CATALINA_DATESTAMP".equals(p.outputGrokPatternName) == false) + .collect(Collectors.toList()) + ); + ORDERED_CANDIDATE_FORMATS_ECS_V1 = Collections.unmodifiableList(items); + } + /** * It is expected that the explanation will be shared with other code. * Both this class and other classes will update it. @@ -304,6 +349,27 @@ public final class TimestampFormatFinder { private List matchedFormats; private List cachedJavaTimestampFormats; + /** + * Construct without any specific timestamp format override. + * @param explanation List of reasons for making decisions. May contain items when passed and new reasons + * can be appended by the methods of this class. + * @param requireFullMatch Must samples added to this object represent a timestamp in their entirety? + * @param errorOnNoTimestamp Should an exception be thrown if a sample is added that does not contain a recognised timestamp? + * @param errorOnMultiplePatterns Should an exception be thrown if samples are uploaded that require different Grok patterns? + * @param timeoutChecker Will abort the operation if its timeout is exceeded. + * @param ecsCompatibility Mode of compatibility with ECS compliant Grok patterns. + */ + public TimestampFormatFinder( + List explanation, + boolean requireFullMatch, + boolean errorOnNoTimestamp, + boolean errorOnMultiplePatterns, + TimeoutChecker timeoutChecker, + boolean ecsCompatibility + ) { + this(explanation, null, requireFullMatch, errorOnNoTimestamp, errorOnMultiplePatterns, timeoutChecker, ecsCompatibility); + } + /** * Construct without any specific timestamp format override. * @param explanation List of reasons for making decisions. May contain items when passed and new reasons @@ -320,7 +386,7 @@ public TimestampFormatFinder( boolean errorOnMultiplePatterns, TimeoutChecker timeoutChecker ) { - this(explanation, null, requireFullMatch, errorOnNoTimestamp, errorOnMultiplePatterns, timeoutChecker); + this(explanation, null, requireFullMatch, errorOnNoTimestamp, errorOnMultiplePatterns, timeoutChecker, ECS_COMPATIBILITY); } /** @@ -335,6 +401,7 @@ public TimestampFormatFinder( * @param errorOnNoTimestamp Should an exception be thrown if a sample is added that does not contain a recognised timestamp? * @param errorOnMultiplePatterns Should an exception be thrown if samples are uploaded that require different Grok patterns? * @param timeoutChecker Will abort the operation if its timeout is exceeded. + * @param ecsCompatibility Mode of compatibility with ECS compliant Grok patterns. */ public TimestampFormatFinder( List explanation, @@ -342,14 +409,16 @@ public TimestampFormatFinder( boolean requireFullMatch, boolean errorOnNoTimestamp, boolean errorOnMultiplePatterns, - TimeoutChecker timeoutChecker + TimeoutChecker timeoutChecker, + boolean ecsCompatibility ) { this.explanation = Objects.requireNonNull(explanation); this.requireFullMatch = requireFullMatch; this.errorOnNoTimestamp = errorOnNoTimestamp; this.errorOnMultiplePatterns = errorOnMultiplePatterns; this.orderedCandidateFormats = (overrideFormat != null) - ? Collections.singletonList(makeCandidateFromOverrideFormat(overrideFormat, timeoutChecker)) + ? Collections.singletonList(makeCandidateFromOverrideFormat(overrideFormat, timeoutChecker, ecsCompatibility)) + : ecsCompatibility ? ORDERED_CANDIDATE_FORMATS_ECS_V1 : ORDERED_CANDIDATE_FORMATS; this.timeoutChecker = Objects.requireNonNull(timeoutChecker); this.matches = new ArrayList<>(); @@ -445,11 +514,16 @@ static Tuple overrideFormatToGrokAndRegex(String overrideFormat) /** * Given a user supplied Java timestamp format, return an appropriate candidate timestamp object as required by this class. * The returned candidate might be a built-in one, or might be generated from the supplied format. - * @param overrideFormat A user supplied Java timestamp format. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. + * @param overrideFormat A user supplied Java timestamp format. + * @param timeoutChecker Will abort the operation if its timeout is exceeded. + * @param ecsCompatibility Mode of compatibility with ECS compliant Grok patterns. * @return An appropriate candidate timestamp object. */ - static CandidateTimestampFormat makeCandidateFromOverrideFormat(String overrideFormat, TimeoutChecker timeoutChecker) { + static CandidateTimestampFormat makeCandidateFromOverrideFormat( + String overrideFormat, + TimeoutChecker timeoutChecker, + boolean ecsCompatibility + ) { // First check for a special format string switch (overrideFormat.toUpperCase(Locale.ROOT)) { @@ -476,7 +550,7 @@ static CandidateTimestampFormat makeCandidateFromOverrideFormat(String overrideF String generatedTimestamp = javaTimeFormatter.withZone(ZoneOffset.ofHoursMinutesSeconds(5, 45, 0)) .format(Instant.ofEpochMilli(981173106123L).plusNanos(456789L)); BitSet numberPosBitSet = stringToNumberPosBitSet(generatedTimestamp); - for (CandidateTimestampFormat candidate : ORDERED_CANDIDATE_FORMATS) { + for (CandidateTimestampFormat candidate : ecsCompatibility ? ORDERED_CANDIDATE_FORMATS_ECS_V1 : ORDERED_CANDIDATE_FORMATS) { TimestampMatch match = checkCandidate(candidate, generatedTimestamp, numberPosBitSet, true, timeoutChecker); if (match != null) { diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinder.java index ab9c4c1d0ef97..cee90c5c5b53b 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinder.java @@ -111,6 +111,7 @@ static XmlTextStructureFinder makeXmlTextStructureFinder( .setJodaTimestampFormats(timeField.v2().getJodaTimestampFormats()) .setJavaTimestampFormats(timeField.v2().getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) + .setEcsCompatibility(overrides.getEcsCompatibility()) .setIngestPipeline( TextStructureUtils.makeIngestPipelineDefinition( null, @@ -120,7 +121,8 @@ static XmlTextStructureFinder makeXmlTextStructureFinder( topLevelTag + "." + timeField.v1(), timeField.v2().getJavaTimestampFormats(), needClientTimeZone, - timeField.v2().needNanosecondPrecision() + timeField.v2().needNanosecondPrecision(), + overrides.getEcsCompatibility() ) ); } diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/GrokPatternCreatorTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/GrokPatternCreatorTests.java index 58ca141c21215..e873390719d20 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/GrokPatternCreatorTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/GrokPatternCreatorTests.java @@ -22,6 +22,9 @@ public class GrokPatternCreatorTests extends TextStructureTestCase { + private static final boolean ECS_COMPATIBILITY_V1 = true; + private static final boolean ECS_COMPATIBILITY_DISABLED = false; + public void testBuildFieldName() { Map fieldNameCountStore = new HashMap<>(); assertEquals("field", GrokPatternCreator.buildFieldName(fieldNameCountStore, "field")); @@ -42,31 +45,66 @@ public void testPopulatePrefacesAndEpiloguesGivenTimestamp() { "junk [2018-01-22T07:33:23] INFO ", "[2018-01-21T03:33:23] DEBUG " ); - ValueOnlyGrokPatternCandidate candidate = new ValueOnlyGrokPatternCandidate("TIMESTAMP_ISO8601", "date", "extra_timestamp"); - - Map fieldNameCountStore = new HashMap<>(); - Collection prefaces = new ArrayList<>(); - Collection epilogues = new ArrayList<>(); - candidate.processCaptures(explanation, fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null, NOOP_TIMEOUT_CHECKER); - - assertThat(prefaces, containsInAnyOrder("[", "[", "junk [", "[")); - assertThat(epilogues, containsInAnyOrder("] DEBUG ", "] ERROR ", "] INFO ", "] DEBUG ")); + for (boolean ECS_COMPATIBILITY_MODE : Arrays.asList(ECS_COMPATIBILITY_DISABLED, ECS_COMPATIBILITY_V1)) { + ValueOnlyGrokPatternCandidate candidate = new ValueOnlyGrokPatternCandidate( + "TIMESTAMP_ISO8601", + "date", + "extra_timestamp", + ECS_COMPATIBILITY_MODE + ); + + Map fieldNameCountStore = new HashMap<>(); + Collection prefaces = new ArrayList<>(); + Collection epilogues = new ArrayList<>(); + candidate.processCaptures( + explanation, + fieldNameCountStore, + matchingStrings, + prefaces, + epilogues, + null, + null, + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_MODE + ); + + assertThat(prefaces, containsInAnyOrder("[", "[", "junk [", "[")); + assertThat(epilogues, containsInAnyOrder("] DEBUG ", "] ERROR ", "] INFO ", "] DEBUG ")); + } } public void testPopulatePrefacesAndEpiloguesGivenEmailAddress() { Collection matchingStrings = Arrays.asList("before alice@acme.com after", "abc bob@acme.com xyz", "carol@acme.com"); - ValueOnlyGrokPatternCandidate candidate = new ValueOnlyGrokPatternCandidate("EMAILADDRESS", "keyword", "email"); - Map fieldNameCountStore = new HashMap<>(); - Collection prefaces = new ArrayList<>(); - Collection epilogues = new ArrayList<>(); - - candidate.processCaptures(explanation, fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null, NOOP_TIMEOUT_CHECKER); - - assertThat(prefaces, containsInAnyOrder("before ", "abc ", "")); - assertThat(epilogues, containsInAnyOrder(" after", " xyz", "")); + for (boolean ECS_COMPATIBILITY_MODE : Arrays.asList(ECS_COMPATIBILITY_DISABLED, ECS_COMPATIBILITY_V1)) { + ValueOnlyGrokPatternCandidate candidate = new ValueOnlyGrokPatternCandidate( + "EMAILADDRESS", + "keyword", + "email", + ECS_COMPATIBILITY_MODE + ); + + Map fieldNameCountStore = new HashMap<>(); + Collection prefaces = new ArrayList<>(); + Collection epilogues = new ArrayList<>(); + + candidate.processCaptures( + explanation, + fieldNameCountStore, + matchingStrings, + prefaces, + epilogues, + null, + null, + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_MODE + ); + + assertThat(prefaces, containsInAnyOrder("before ", "abc ", "")); + assertThat(epilogues, containsInAnyOrder(" after", " xyz", "")); + } } public void testAppendBestGrokMatchForStringsGivenTimestampsAndLogLevels() { @@ -78,107 +116,149 @@ public void testAppendBestGrokMatchForStringsGivenTimestampsAndLogLevels() { "[2018-01-21T03:33:23] DEBUG " ); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - snippets, - null, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); - - assertEquals( - ".*?\\[%{TIMESTAMP_ISO8601:extra_timestamp}\\] %{LOGLEVEL:loglevel} ", - grokPatternCreator.getOverallGrokPatternBuilder().toString() - ); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + snippets, + null, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER, ECS_COMPATIBILITY_V1) }; + + String[] expectedLoglevel = new String[] { "loglevel", "log.level" }; + + int index = 0; + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); + + String loglevel = expectedLoglevel[index]; + assertEquals( + ".*?\\[%{TIMESTAMP_ISO8601:extra_timestamp}\\] %{LOGLEVEL:" + loglevel + "} ", + grokPatternCreator.getOverallGrokPatternBuilder().toString() + ); + + ++index; + } } public void testAppendBestGrokMatchForStringsGivenNumbersInBrackets() { Collection snippets = Arrays.asList("(-2)", " (-3)", " (4)", " (-5) "); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - snippets, - null, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); - - assertEquals(".*?\\(%{INT:field}\\).*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + snippets, + null, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER, ECS_COMPATIBILITY_V1) }; + + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); + + assertEquals(".*?\\(%{INT:field}\\).*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + } } public void testAppendBestGrokMatchForStringsGivenNegativeNumbersWithoutBreak() { Collection snippets = Arrays.asList("before-2 ", "prior to-3", "-4"); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - snippets, - null, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); - - // It seems sensible that we don't detect these suffices as either base 10 or base 16 numbers - assertEquals(".*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + snippets, + null, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER, ECS_COMPATIBILITY_V1) }; + + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); + + // It seems sensible that we don't detect these suffices as either base 10 or base 16 numbers + assertEquals(".*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + } } public void testAppendBestGrokMatchForStringsGivenHexNumbers() { Collection snippets = Arrays.asList(" abc", " 123", " -123", "1f is hex"); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - snippets, - null, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); - - assertEquals(".*?%{BASE16NUM:field}.*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + snippets, + null, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER, ECS_COMPATIBILITY_V1) }; + + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); + + assertEquals(".*?%{BASE16NUM:field}.*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + } } public void testAppendBestGrokMatchForStringsGivenHostnamesWithNumbers() { Collection snippets = Arrays.asList(" snippets = Arrays.asList("before alice@acme.com after", "abc bob@acme.com xyz", "carol@acme.com"); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - snippets, - null, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); - - assertEquals(".*?%{EMAILADDRESS:email}.*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + snippets, + null, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER, ECS_COMPATIBILITY_V1) }; + + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); + + assertEquals(".*?%{EMAILADDRESS:email}.*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + } } public void testAppendBestGrokMatchForStringsGivenUris() { @@ -189,34 +269,52 @@ public void testAppendBestGrokMatchForStringsGivenUris() { "download today from https://www.elastic.co/downloads" ); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - snippets, - null, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + snippets, + null, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER, ECS_COMPATIBILITY_V1) }; - assertEquals(".*?%{URI:uri}.*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + String[] expectedUris = new String[] { "uri", "url.original" }; + + int index = 0; + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + String expectedUri = expectedUris[index]; + grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); + + assertEquals(".*?%{URI:" + expectedUri + "}.*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + + ++index; + } } public void testAppendBestGrokMatchForStringsGivenPaths() { Collection snippets = Arrays.asList("on Mac /Users/dave", "on Windows C:\\Users\\dave", "on Linux /home/dave"); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - snippets, - null, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); - - assertEquals(".*? .*? %{PATH:path}", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + snippets, + null, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER, ECS_COMPATIBILITY_V1) }; + + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); + + assertEquals(".*? .*? %{PATH:path}", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + } } public void testAppendBestGrokMatchForStringsGivenKvPairs() { @@ -228,17 +326,23 @@ public void testAppendBestGrokMatchForStringsGivenKvPairs() { " foo=1 bar=a " ); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - snippets, - null, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); - - assertEquals(".*?\\bfoo=%{USER:foo} .*?\\bbar=%{USER:bar}.*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + snippets, + null, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER, ECS_COMPATIBILITY_V1) }; + + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); + + assertEquals(".*?\\bfoo=%{USER:foo} .*?\\bbar=%{USER:bar}.*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + } } public void testCreateGrokPatternFromExamplesGivenNamedLogs() { @@ -251,26 +355,52 @@ public void testCreateGrokPatternFromExamplesGivenNamedLogs() { ); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - sampleMessages, - mappings, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - - assertEquals( - "%{SYSLOGTIMESTAMP:timestamp} .*? .*?\\[%{INT:field}\\]: %{LOGLEVEL:loglevel} \\(.*? .*? .*?\\) .*? " - + "%{QUOTEDSTRING:field2}: %{IP:ipaddress}#%{INT:field3}", - grokPatternCreator.createGrokPatternFromExamples("SYSLOGTIMESTAMP", TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, "timestamp") - ); - assertEquals(5, mappings.size()); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("field2")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field3")); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ) }; + + String[] expectedLoglevel = new String[] { "loglevel", "log.level" }; + + int index = 0; + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + String loglevel = expectedLoglevel[index]; + assertEquals( + "%{SYSLOGTIMESTAMP:timestamp} .*? .*?\\[%{INT:field}\\]: %{LOGLEVEL:" + + loglevel + + "} \\(.*? .*? .*?\\) .*? " + + "%{QUOTEDSTRING:field2}: %{IP:ipaddress}#%{INT:field3}", + grokPatternCreator.createGrokPatternFromExamples( + "SYSLOGTIMESTAMP", + TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, + "timestamp" + ) + ); + assertEquals(5, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get(loglevel)); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("field2")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field3")); + + mappings.clear(); + ++index; + } } public void testCreateGrokPatternFromExamplesGivenCatalinaLogs() { @@ -287,25 +417,168 @@ public void testCreateGrokPatternFromExamplesGivenCatalinaLogs() { ); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - sampleMessages, - mappings, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ) }; + + String[] expectedLoglevel = new String[] { "loglevel", "log.level" }; + + int index = 0; + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + String loglevel = expectedLoglevel[index]; + assertEquals( + "%{CATALINA_DATESTAMP:timestamp} .*? .*?\\n%{LOGLEVEL:" + loglevel + "}: .*", + grokPatternCreator.createGrokPatternFromExamples( + "CATALINA_DATESTAMP", + TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, + "timestamp" + ) + ); + assertEquals(1, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get(loglevel)); + + mappings.clear(); + ++index; + } + + { + // ECS compatible Grok also supports the new CATALINA7_DATESTAMP pattern name + GrokPatternCreator grokPatternCreator = new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ); + assertEquals( + "%{CATALINA7_DATESTAMP:timestamp} .*? .*?\\n%{LOGLEVEL:log.level}: .*", + grokPatternCreator.createGrokPatternFromExamples( + "CATALINA7_DATESTAMP", + TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, + "timestamp" + ) + ); + assertEquals(1, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("log.level")); + } + + } - assertEquals( - "%{CATALINA_DATESTAMP:timestamp} .*? .*?\\n%{LOGLEVEL:loglevel}: .*", - grokPatternCreator.createGrokPatternFromExamples( - "CATALINA_DATESTAMP", - TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, - "timestamp" - ) + public void testCreateGrokPatternFromExamplesGivenCatalina8Logs() { + + // Tomcat 8.5/9.0 has new datestamp and logging formats - these are only supported by ECS compatible Grok patterns, i.e.: + // CATALINA8_DATESTAMP %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} + // CATALINA8_LOG %{CATALINA8_DATESTAMP:timestamp} %{LOGLEVEL:log.level} \[%{DATA:java.log.origin.thread.name}\] + // %{JAVACLASS:java.log.origin.class.name}\.(?:%{JAVAMETHOD:log.origin.function})? %{JAVALOGMESSAGE:message} + // CATALINA_DATESTAMP (?:%{CATALINA8_DATESTAMP})|(?:%{CATALINA7_DATESTAMP}) + // CATALINALOG (?:%{CATALINA8_LOG})|(?:%{CATALINA7_LOG}) + Collection sampleMessages = Arrays.asList( + "29-Aug-2009 12:03:33.123 WARNING [main] org.apache.tomcat.util.http.Parameters.processParameters Parameters: " + + "Invalid chunk ignored.", + "29-Aug-2009 12:03:40.234 WARNING [main] org.apache.tomcat.util.http.Parameters.processParameters Parameters: " + + "Invalid chunk ignored.", + "29-Aug-2009 12:03:45.456 WARNING [main] org.apache.tomcat.util.http.Parameters.processParameters Parameters: " + + "Invalid chunk ignored.", + "29-Aug-2009 12:03:57.567 WARNING [main] org.apache.tomcat.util.http.Parameters.processParameters Parameters: " + + "Invalid chunk ignored." ); - assertEquals(1, mappings.size()); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + + { + Map mappings = new HashMap<>(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ); + + // Non ECS compatible Grok patterns will fail to match the new CATALINA_DATESTAMP format + IllegalStateException e = expectThrows( + IllegalStateException.class, + () -> grokPatternCreator.createGrokPatternFromExamples( + "CATALINA_DATESTAMP", + TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, + "timestamp" + ) + ); + + assertEquals( + "[%{CATALINA_DATESTAMP}] does not match snippet [29-Aug-2009 12:03:33.123 WARNING [main]" + + " org.apache.tomcat.util.http.Parameters.processParameters Parameters: Invalid chunk ignored.]", + e.getMessage() + ); + } + { + Map mappings = new HashMap<>(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ); + + // ECS compatible Grok patterns match the new, generic CATALINA_DATESTAMP format and provide the new, ECS name for the LOGLEVEL + // capture. + assertEquals( + "%{CATALINA_DATESTAMP:timestamp} %{LOGLEVEL:log.level} \\[.*", + grokPatternCreator.createGrokPatternFromExamples( + "CATALINA_DATESTAMP", + TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, + "timestamp" + ) + ); + assertEquals(1, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("log.level")); + } + { + Map mappings = new HashMap<>(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ); + + // ECS compatible Grok patterns also understand the new CATALINA8_DATESTAMP Grok pattern name and provide the new, ECS name for + // the LOGLEVEL capture. + assertEquals( + "%{CATALINA8_DATESTAMP:timestamp} %{LOGLEVEL:log.level} \\[.*", + grokPatternCreator.createGrokPatternFromExamples( + "CATALINA8_DATESTAMP", + TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, + "timestamp" + ) + ); + assertEquals(1, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("log.level")); + } } public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogs() { @@ -323,33 +596,56 @@ public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogs() { ); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - sampleMessages, - mappings, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - assertEquals( - "%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{TIMESTAMP_ISO8601:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" - + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:loglevel}\\t.*", - grokPatternCreator.createGrokPatternFromExamples( - "TIMESTAMP_ISO8601", - TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, - "timestamp" - ) - ); - assertEquals(5, mappings.size()); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); - Map expectedDateMapping = new HashMap<>(); - expectedDateMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); - expectedDateMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); - assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ) }; + + String[] expectedLoglevel = new String[] { "loglevel", "log.level" }; + + int index = 0; + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + String loglevel = expectedLoglevel[index]; + assertEquals( + "%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{TIMESTAMP_ISO8601:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" + + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:" + + loglevel + + "}\\t.*", + grokPatternCreator.createGrokPatternFromExamples( + "TIMESTAMP_ISO8601", + TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, + "timestamp" + ) + ); + assertEquals(5, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get(loglevel)); + + mappings.clear(); + ++index; + } } public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogsAndIndeterminateFormat() { @@ -367,33 +663,56 @@ public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogsAndIndetermi ); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - sampleMessages, - mappings, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - assertEquals( - "%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{DATESTAMP:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" - + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:loglevel}\\t.*", - grokPatternCreator.createGrokPatternFromExamples( - "TIMESTAMP_ISO8601", - TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, - "timestamp" - ) - ); - assertEquals(5, mappings.size()); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); - Map expectedDateMapping = new HashMap<>(); - expectedDateMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date_nanos"); - expectedDateMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "dd/MM/yyyy HH:mm:ss,SSSSSS"); - assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ) }; + + String[] expectedLoglevel = new String[] { "loglevel", "log.level" }; + + int index = 0; + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + String loglevel = expectedLoglevel[index]; + assertEquals( + "%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{DATESTAMP:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" + + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:" + + loglevel + + "}\\t.*", + grokPatternCreator.createGrokPatternFromExamples( + "TIMESTAMP_ISO8601", + TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, + "timestamp" + ) + ); + assertEquals(5, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date_nanos"); + expectedDateMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "dd/MM/yyyy HH:mm:ss,SSSSSS"); + assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get(loglevel)); + + mappings.clear(); + ++index; + } } public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogsAndCustomDefinition() { @@ -411,32 +730,55 @@ public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogsAndCustomDef ); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - sampleMessages, - mappings, - null, - Collections.singletonMap("CUSTOM_TIMESTAMP", "%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}(?:AM|PM)"), - NOOP_TIMEOUT_CHECKER - ); - Map customMapping = new HashMap<>(); - customMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); - customMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "M/dd/yyyy h:mma"); - assertEquals( - "%{INT:field}\\t%{CUSTOM_TIMESTAMP:timestamp}\\t%{TIMESTAMP_ISO8601:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" - + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:loglevel}\\t.*", - grokPatternCreator.createGrokPatternFromExamples("CUSTOM_TIMESTAMP", customMapping, "timestamp") - ); - assertEquals(5, mappings.size()); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); - Map expectedDateMapping = new HashMap<>(); - expectedDateMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); - expectedDateMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); - assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.singletonMap("CUSTOM_TIMESTAMP", "%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}(?:AM|PM)"), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.singletonMap("CUSTOM_TIMESTAMP", "%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}(?:AM|PM)"), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ) }; + + String[] expectedLoglevel = new String[] { "loglevel", "log.level" }; + + int index = 0; + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + String loglevel = expectedLoglevel[index]; + Map customMapping = new HashMap<>(); + customMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); + customMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "M/dd/yyyy h:mma"); + assertEquals( + "%{INT:field}\\t%{CUSTOM_TIMESTAMP:timestamp}\\t%{TIMESTAMP_ISO8601:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" + + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:" + + loglevel + + "}\\t.*", + grokPatternCreator.createGrokPatternFromExamples("CUSTOM_TIMESTAMP", customMapping, "timestamp") + ); + assertEquals(5, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get(loglevel)); + + mappings.clear(); + ++index; + } } public void testCreateGrokPatternFromExamplesGivenTimestampAndTimeWithoutDate() { @@ -454,30 +796,53 @@ public void testCreateGrokPatternFromExamplesGivenTimestampAndTimeWithoutDate() ); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - sampleMessages, - mappings, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - assertEquals( - "%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{TIME:time}\\t%{INT:field2}\\t.*?\\t" - + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:loglevel}\\t.*", - grokPatternCreator.createGrokPatternFromExamples( - "TIMESTAMP_ISO8601", - TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, - "timestamp" - ) - ); - assertEquals(5, mappings.size()); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("time")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ) }; + + String[] expectedLoglevel = new String[] { "loglevel", "log.level" }; + + int index = 0; + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + String loglevel = expectedLoglevel[index]; + assertEquals( + "%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{TIME:time}\\t%{INT:field2}\\t.*?\\t" + + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:" + + loglevel + + "}\\t.*", + grokPatternCreator.createGrokPatternFromExamples( + "TIMESTAMP_ISO8601", + TextStructureUtils.DATE_MAPPING_WITHOUT_FORMAT, + "timestamp" + ) + ); + assertEquals(5, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("time")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get(loglevel)); + + mappings.clear(); + ++index; + } } public void testFindFullLineGrokPatternGivenApacheCombinedLogs() { @@ -501,30 +866,73 @@ public void testFindFullLineGrokPatternGivenApacheCombinedLogs() { ); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - sampleMessages, - mappings, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - assertEquals( - new Tuple<>("timestamp", "%{COMBINEDAPACHELOG}"), - grokPatternCreator.findFullLineGrokPattern(randomBoolean() ? "timestamp" : null) - ); - assertEquals(10, mappings.size()); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "text"), mappings.get("agent")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("auth")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("bytes")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("clientip")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "double"), mappings.get("httpversion")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("ident")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("referrer")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("request")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("response")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("verb")); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ) }; + + { + assertEquals( + new Tuple<>("timestamp", "%{COMBINEDAPACHELOG}"), + grokPatternCreators[0].findFullLineGrokPattern(randomBoolean() ? "timestamp" : null) + ); + assertEquals(10, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "text"), mappings.get("agent")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("auth")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("bytes")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("clientip")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "double"), mappings.get("httpversion")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("ident")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("referrer")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("request")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("response")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("verb")); + + mappings.clear(); + } + { + // ECS Grok patterns for httpd logs have markedly different capture names from the legacy ones + assertEquals( + new Tuple<>("timestamp", "%{COMBINEDAPACHELOG}"), + grokPatternCreators[1].findFullLineGrokPattern(randomBoolean() ? "timestamp" : null) + ); + assertEquals(8, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "text"), mappings.get("user_agent.original")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("url.original")); + assertEquals( + Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), + mappings.get("http.response.body.bytes") + ); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("source.address")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "double"), mappings.get("http.version")); + assertEquals( + Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), + mappings.get("http.request.referrer") + ); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("http.request.method")); + assertEquals( + Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), + mappings.get("http.response.status_code") + ); + + mappings.clear(); + } } public void testAdjustForPunctuationGivenCommonPrefix() { @@ -540,22 +948,28 @@ public void testAdjustForPunctuationGivenCommonPrefix() { "0000000000000000"\ """.lines().toList(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - snippets, - null, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - Collection adjustedSnippets = grokPatternCreator.adjustForPunctuation(snippets); - - assertEquals("\",", grokPatternCreator.getOverallGrokPatternBuilder().toString()); - assertNotNull(adjustedSnippets); - assertThat( - new ArrayList<>(adjustedSnippets), - containsInAnyOrder(snippets.stream().map(snippet -> snippet.substring(2)).toArray(String[]::new)) - ); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + snippets, + null, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER, ECS_COMPATIBILITY_V1) }; + + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + Collection adjustedSnippets = grokPatternCreator.adjustForPunctuation(snippets); + + assertEquals("\",", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + assertNotNull(adjustedSnippets); + assertThat( + new ArrayList<>(adjustedSnippets), + containsInAnyOrder(snippets.stream().map(snippet -> snippet.substring(2)).toArray(String[]::new)) + ); + } } public void testAdjustForPunctuationGivenNoCommonPrefix() { @@ -566,18 +980,24 @@ public void testAdjustForPunctuationGivenNoCommonPrefix() { + "was added by 'User1'(id:2) to servergroup 'GAME'(id:9)" ); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - snippets, - null, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - Collection adjustedSnippets = grokPatternCreator.adjustForPunctuation(snippets); - - assertEquals("", grokPatternCreator.getOverallGrokPatternBuilder().toString()); - assertSame(snippets, adjustedSnippets); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + snippets, + null, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER, ECS_COMPATIBILITY_V1) }; + + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + Collection adjustedSnippets = grokPatternCreator.adjustForPunctuation(snippets); + + assertEquals("", grokPatternCreator.getOverallGrokPatternBuilder().toString()); + assertSame(snippets, adjustedSnippets); + } } public void testValidateFullLineGrokPatternGivenValid() { @@ -600,29 +1020,45 @@ public void testValidateFullLineGrokPatternGivenValid() { ); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - sampleMessages, - mappings, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - grokPatternCreator.validateFullLineGrokPattern(grokPattern, timestampField); - assertEquals(9, mappings.size()); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("serial_no")); - Map expectedDateMapping = new HashMap<>(); - expectedDateMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); - expectedDateMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); - assertEquals(expectedDateMapping, mappings.get("local_timestamp")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("user_id")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("host")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("client_ip")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("method")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("severity")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("program")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("message")); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ) }; + + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + grokPatternCreator.validateFullLineGrokPattern(grokPattern, timestampField); + assertEquals(9, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("serial_no")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("local_timestamp")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("user_id")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("host")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("client_ip")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("method")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("severity")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("program")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("message")); + + mappings.clear(); + } } public void testValidateFullLineGrokPatternGivenValidAndCustomDefinition() { @@ -645,29 +1081,45 @@ public void testValidateFullLineGrokPatternGivenValidAndCustomDefinition() { ); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - sampleMessages, - mappings, - null, - Collections.singletonMap("CUSTOM_TIMESTAMP", "%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}(?:AM|PM)"), - NOOP_TIMEOUT_CHECKER - ); - grokPatternCreator.validateFullLineGrokPattern(grokPattern, timestampField); - assertEquals(9, mappings.size()); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("serial_no")); - Map expectedDateMapping = new HashMap<>(); - expectedDateMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); - expectedDateMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); - assertEquals(expectedDateMapping, mappings.get("utc_timestamp")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("user_id")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("host")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("client_ip")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("method")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("severity")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("program")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("message")); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.singletonMap("CUSTOM_TIMESTAMP", "%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}(?:AM|PM)"), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.singletonMap("CUSTOM_TIMESTAMP", "%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}(?:AM|PM)"), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ) }; + + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + grokPatternCreator.validateFullLineGrokPattern(grokPattern, timestampField); + assertEquals(9, mappings.size()); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("serial_no")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("utc_timestamp")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("user_id")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("host")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("client_ip")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("method")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("severity")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("program")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("message")); + + mappings.clear(); + } } public void testValidateFullLineGrokPatternGivenInvalid() { @@ -685,21 +1137,38 @@ public void testValidateFullLineGrokPatternGivenInvalid() { ); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator( - explanation, - sampleMessages, - mappings, - null, - Collections.emptyMap(), - NOOP_TIMEOUT_CHECKER - ); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> grokPatternCreator.validateFullLineGrokPattern(grokPattern, timestampField) - ); - - assertEquals("Supplied Grok pattern [" + grokPattern + "] does not match sample messages", e.getMessage()); + GrokPatternCreator[] grokPatternCreators = new GrokPatternCreator[] { + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_DISABLED + ), + new GrokPatternCreator( + explanation, + sampleMessages, + mappings, + null, + Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER, + ECS_COMPATIBILITY_V1 + ) }; + + for (GrokPatternCreator grokPatternCreator : grokPatternCreators) { + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> grokPatternCreator.validateFullLineGrokPattern(grokPattern, timestampField) + ); + + assertEquals("Supplied Grok pattern [" + grokPattern + "] does not match sample messages", e.getMessage()); + + mappings.clear(); + } } public void testLongestRun() { diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureUtilsTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureUtilsTests.java index 7a99b35e71197..0f4a8044657a0 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureUtilsTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TextStructureUtilsTests.java @@ -10,18 +10,24 @@ import org.elasticsearch.xpack.core.textstructure.structurefinder.FieldStats; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.SortedMap; +import java.util.function.Consumer; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class TextStructureUtilsTests extends TextStructureTestCase { + private static final boolean ECS_COMPATIBILITY_DISABLED = false; + private static final boolean ECS_COMPATIBILITY_ENABLED = true; + + private static final Collection ecsCompatibilityModes = Arrays.asList(ECS_COMPATIBILITY_ENABLED, ECS_COMPATIBILITY_DISABLED); public void testMoreLikelyGivenText() { assertTrue(TextStructureUtils.isMoreLikelyTextThanKeyword("the quick brown fox jumped over the lazy dog")); @@ -290,43 +296,68 @@ public void testGuessTimestampGivenSamplesWithManyFieldsInconsistentAndConsisten } public void testGuessMappingGivenNothing() { - assertNull(guessMapping(explanation, "foo", Collections.emptyList())); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> assertNull( + guessMapping(explanation, "foo", Collections.emptyList(), ecsCompatibility) + ); + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testGuessMappingGivenKeyword() { Map expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("ERROR", "INFO", "DEBUG"))); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("2018-06-11T13:26:47Z", "not a date"))); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> { + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("ERROR", "INFO", "DEBUG"), ecsCompatibility)); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("2018-06-11T13:26:47Z", "not a date"), ecsCompatibility)); + }; + + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testGuessMappingGivenText() { Map expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "text"); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("a", "the quick brown fox jumped over the lazy dog"))); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> assertEquals( + expected, + guessMapping(explanation, "foo", Arrays.asList("a", "the quick brown fox jumped over the lazy dog"), ecsCompatibility) + ); + + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testGuessMappingGivenIp() { Map expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "ip"); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("10.0.0.1", "172.16.0.1", "192.168.0.1"))); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> assertEquals( + expected, + guessMapping(explanation, "foo", Arrays.asList("10.0.0.1", "172.16.0.1", "192.168.0.1"), ecsCompatibility) + ); + + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testGuessMappingGivenDouble() { Map expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "double"); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("3.14159265359", "0", "-8"))); - // 12345678901234567890 is too long for long - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("1", "2", "12345678901234567890"))); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(3.14159265359, 0.0, 1e-308))); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("-1e-1", "-1e308", "1e-308"))); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> { + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("3.14159265359", "0", "-8"), ecsCompatibility)); + // 12345678901234567890 is too long for long + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("1", "2", "12345678901234567890"), ecsCompatibility)); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(3.14159265359, 0.0, 1e-308), ecsCompatibility)); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("-1e-1", "-1e308", "1e-308"), ecsCompatibility)); + }; + + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testGuessMappingGivenLong() { Map expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("500", "3", "-3"))); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(500, 6, 0))); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> { + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("500", "3", "-3"), ecsCompatibility)); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(500, 6, 0), ecsCompatibility)); + }; + + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testGuessMappingGivenDate() { @@ -334,46 +365,75 @@ public void testGuessMappingGivenDate() { expected.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); expected.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("2018-06-11T13:26:47Z", "2018-06-11T13:27:12Z"))); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> assertEquals( + expected, + guessMapping(explanation, "foo", Arrays.asList("2018-06-11T13:26:47Z", "2018-06-11T13:27:12Z"), ecsCompatibility) + ); + + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testGuessMappingGivenBoolean() { Map expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "boolean"); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("false", "true"))); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(true, false))); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> { + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("false", "true"), ecsCompatibility)); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(true, false), ecsCompatibility)); + }; + + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testGuessMappingGivenArray() { - Map expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(42, Arrays.asList(1, -99)))); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> { - expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"); + Map expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"); - assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(new String[] { "x", "y" }, "z"))); + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(42, Arrays.asList(1, -99)), ecsCompatibility)); + + expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"); + + assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList(new String[] { "x", "y" }, "z"), ecsCompatibility)); + }; + + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testGuessMappingGivenObject() { - Map expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "object"); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> { + Map expected = Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "object"); - assertEquals( - expected, - guessMapping( - explanation, - "foo", - Arrays.asList(Collections.singletonMap("name", "value1"), Collections.singletonMap("name", "value2")) - ) - ); + assertEquals( + expected, + guessMapping( + explanation, + "foo", + Arrays.asList(Collections.singletonMap("name", "value1"), Collections.singletonMap("name", "value2")), + ecsCompatibility + ) + ); + }; + + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testGuessMappingGivenObjectAndNonObject() { - RuntimeException e = expectThrows( - RuntimeException.class, - () -> guessMapping(explanation, "foo", Arrays.asList(Collections.singletonMap("name", "value1"), "value2")) - ); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> { + RuntimeException e = expectThrows( + RuntimeException.class, + () -> guessMapping( + explanation, + "foo", + Arrays.asList(Collections.singletonMap("name", "value1"), "value2"), + ecsCompatibility + ) + ); + + assertEquals("Field [foo] has both object and non-object values - this is not supported by Elasticsearch", e.getMessage()); + }; - assertEquals("Field [foo] has both object and non-object values - this is not supported by Elasticsearch", e.getMessage()); + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testGuessMappingsAndCalculateFieldStats() { @@ -388,36 +448,40 @@ public void testGuessMappingsAndCalculateFieldStats() { sample2.put("bar", 17); sample2.put("nothing", null); - Tuple, SortedMap> mappingsAndFieldStats = TextStructureUtils - .guessMappingsAndCalculateFieldStats(explanation, Arrays.asList(sample1, sample2), NOOP_TIMEOUT_CHECKER); - assertNotNull(mappingsAndFieldStats); - - Map mappings = mappingsAndFieldStats.v1(); - assertNotNull(mappings); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("foo")); - Map expectedTimeMapping = new HashMap<>(); - expectedTimeMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); - expectedTimeMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "yyyy-MM-dd HH:mm:ss,SSS"); - assertEquals(expectedTimeMapping, mappings.get("time")); - assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("bar")); - assertNull(mappings.get("nothing")); - - Map fieldStats = mappingsAndFieldStats.v2(); - assertNotNull(fieldStats); - assertEquals(3, fieldStats.size()); - assertEquals(new FieldStats(2, 2, makeTopHits("not a time", 1, "whatever", 1)), fieldStats.get("foo")); - assertEquals( - new FieldStats( - 2, - 2, - "2018-05-24 17:28:31,735", - "2018-05-29 11:53:02,837", - makeTopHits("2018-05-24 17:28:31,735", 1, "2018-05-29 11:53:02,837", 1) - ), - fieldStats.get("time") - ); - assertEquals(new FieldStats(2, 2, 17.0, 42.0, 29.5, 29.5, makeTopHits(17, 1, 42, 1)), fieldStats.get("bar")); - assertNull(fieldStats.get("nothing")); + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> { + Tuple, SortedMap> mappingsAndFieldStats = TextStructureUtils + .guessMappingsAndCalculateFieldStats(explanation, Arrays.asList(sample1, sample2), NOOP_TIMEOUT_CHECKER, ecsCompatibility); + assertNotNull(mappingsAndFieldStats); + + Map mappings = mappingsAndFieldStats.v1(); + assertNotNull(mappings); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("foo")); + Map expectedTimeMapping = new HashMap<>(); + expectedTimeMapping.put(TextStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedTimeMapping.put(TextStructureUtils.MAPPING_FORMAT_SETTING, "yyyy-MM-dd HH:mm:ss,SSS"); + assertEquals(expectedTimeMapping, mappings.get("time")); + assertEquals(Collections.singletonMap(TextStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("bar")); + assertNull(mappings.get("nothing")); + + Map fieldStats = mappingsAndFieldStats.v2(); + assertNotNull(fieldStats); + assertEquals(3, fieldStats.size()); + assertEquals(new FieldStats(2, 2, makeTopHits("not a time", 1, "whatever", 1)), fieldStats.get("foo")); + assertEquals( + new FieldStats( + 2, + 2, + "2018-05-24 17:28:31,735", + "2018-05-29 11:53:02,837", + makeTopHits("2018-05-24 17:28:31,735", 1, "2018-05-29 11:53:02,837", 1) + ), + fieldStats.get("time") + ); + assertEquals(new FieldStats(2, 2, 17.0, 42.0, 29.5, 29.5, makeTopHits(17, 1, 42, 1)), fieldStats.get("bar")); + assertNull(fieldStats.get("nothing")); + }; + + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } public void testMakeIngestPipelineDefinitionGivenNdJsonWithoutTimestamp() { @@ -431,7 +495,8 @@ public void testMakeIngestPipelineDefinitionGivenNdJsonWithoutTimestamp() { null, null, false, - false + false, + null ) ); } @@ -446,6 +511,7 @@ public void testMakeIngestPipelineDefinitionGivenNdJsonWithTimestamp() { ); boolean needClientTimezone = randomBoolean(); boolean needNanosecondPrecision = randomBoolean(); + String ecsCompatibility = randomAlphaOfLength(80); Map pipeline = TextStructureUtils.makeIngestPipelineDefinition( null, @@ -455,7 +521,8 @@ public void testMakeIngestPipelineDefinitionGivenNdJsonWithTimestamp() { timestampField, timestampFormats, needClientTimezone, - needNanosecondPrecision + needNanosecondPrecision, + ecsCompatibility ); assertNotNull(pipeline); @@ -493,7 +560,8 @@ public void testMakeIngestPipelineDefinitionGivenDelimitedWithoutTimestamp() { null, null, false, - false + false, + null ); assertNotNull(pipeline); @@ -532,7 +600,8 @@ public void testMakeIngestPipelineDefinitionGivenDelimitedWithFieldInTargetField null, null, false, - false + false, + null ); assertNotNull(pipeline); @@ -572,7 +641,8 @@ public void testMakeIngestPipelineDefinitionGivenDelimitedWithConversion() { null, null, false, - false + false, + null ); assertNotNull(pipeline); @@ -617,6 +687,8 @@ public void testMakeIngestPipelineDefinitionGivenDelimitedWithTimestamp() { boolean needClientTimezone = randomBoolean(); boolean needNanosecondPrecision = randomBoolean(); + String ecsCompatibility = randomAlphaOfLength(80); + Map pipeline = TextStructureUtils.makeIngestPipelineDefinition( null, Collections.emptyMap(), @@ -625,7 +697,8 @@ public void testMakeIngestPipelineDefinitionGivenDelimitedWithTimestamp() { timestampField, timestampFormats, needClientTimezone, - needNanosecondPrecision + needNanosecondPrecision, + ecsCompatibility ); assertNotNull(pipeline); @@ -672,6 +745,8 @@ public void testMakeIngestPipelineDefinitionGivenSemiStructured() { boolean needClientTimezone = randomBoolean(); boolean needNanosecondPrecision = randomBoolean(); + String ecsCompatibility = randomAlphaOfLength(80); + Map pipeline = TextStructureUtils.makeIngestPipelineDefinition( grokPattern, Collections.emptyMap(), @@ -680,7 +755,8 @@ public void testMakeIngestPipelineDefinitionGivenSemiStructured() { timestampField, timestampFormats, needClientTimezone, - needNanosecondPrecision + needNanosecondPrecision, + ecsCompatibility ); assertNotNull(pipeline); @@ -715,60 +791,78 @@ public void testMakeIngestPipelineDefinitionGivenSemiStructured() { } public void testGuessGeoPoint() { - Map mapping = TextStructureUtils.guessScalarMapping( - explanation, - "foo", - Arrays.asList("POINT (-77.03653 38.897676)", "POINT (-50.03653 28.8973)"), - NOOP_TIMEOUT_CHECKER - ); - assertThat(mapping.get(TextStructureUtils.MAPPING_TYPE_SETTING), equalTo("geo_point")); - - mapping = TextStructureUtils.guessScalarMapping( - explanation, - "foo", - Arrays.asList("POINT (-77.03653 38.897676)", "bar"), - NOOP_TIMEOUT_CHECKER - ); - assertThat(mapping.get(TextStructureUtils.MAPPING_TYPE_SETTING), equalTo("keyword")); - } + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> { + Map mapping = TextStructureUtils.guessScalarMapping( + explanation, + "foo", + Arrays.asList("POINT (-77.03653 38.897676)", "POINT (-50.03653 28.8973)"), + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + assertThat(mapping.get(TextStructureUtils.MAPPING_TYPE_SETTING), equalTo("geo_point")); - public void testGuessGeoShape() { - Map mapping = TextStructureUtils.guessScalarMapping( - explanation, - "foo", - Arrays.asList( - "POINT (-77.03653 38.897676)", - "LINESTRING (-77.03653 38.897676, -77.009051 38.889939)", - "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0))", - "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), " - + "(100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2))", - "MULTIPOINT (102.0 2.0, 103.0 2.0)", - "MULTILINESTRING ((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0), (100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0)," - + " (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8))", - "MULTIPOLYGON (((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0, 102.0 2.0)), ((100.0 0.0, 101.0 0.0, 101.0 1.0, " - + "100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2)))", - "GEOMETRYCOLLECTION (POINT (100.0 0.0), LINESTRING (101.0 0.0, 102.0 1.0))", - "BBOX (100.0, 102.0, 2.0, 0.0)" - ), - NOOP_TIMEOUT_CHECKER - ); - assertThat(mapping.get(TextStructureUtils.MAPPING_TYPE_SETTING), equalTo("geo_shape")); + mapping = TextStructureUtils.guessScalarMapping( + explanation, + "foo", + Arrays.asList("POINT (-77.03653 38.897676)", "bar"), + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + assertThat(mapping.get(TextStructureUtils.MAPPING_TYPE_SETTING), equalTo("keyword")); + }; - mapping = TextStructureUtils.guessScalarMapping( - explanation, - "foo", - Arrays.asList("POINT (-77.03653 38.897676)", "LINESTRING (-77.03653 38.897676, -77.009051 38.889939)", "bar"), - NOOP_TIMEOUT_CHECKER - ); - assertThat(mapping.get(TextStructureUtils.MAPPING_TYPE_SETTING), equalTo("keyword")); + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); } - private Map guessMapping(List explanation, String fieldName, List fieldValues) { + public void testGuessGeoShape() { + Consumer testGuessMappingGivenEcsCompatibility = (ecsCompatibility) -> { + Map mapping = TextStructureUtils.guessScalarMapping( + explanation, + "foo", + Arrays.asList( + "POINT (-77.03653 38.897676)", + "LINESTRING (-77.03653 38.897676, -77.009051 38.889939)", + "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0))", + "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), " + + "(100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2))", + "MULTIPOINT (102.0 2.0, 103.0 2.0)", + "MULTILINESTRING ((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0), (100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0)," + + " (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8))", + "MULTIPOLYGON (((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0, 102.0 2.0)), ((100.0 0.0, 101.0 0.0, 101.0 1.0, " + + "100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2)))", + "GEOMETRYCOLLECTION (POINT (100.0 0.0), LINESTRING (101.0 0.0, 102.0 1.0))", + "BBOX (100.0, 102.0, 2.0, 0.0)" + ), + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + assertThat(mapping.get(TextStructureUtils.MAPPING_TYPE_SETTING), equalTo("geo_shape")); + + mapping = TextStructureUtils.guessScalarMapping( + explanation, + "foo", + Arrays.asList("POINT (-77.03653 38.897676)", "LINESTRING (-77.03653 38.897676, -77.009051 38.889939)", "bar"), + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + assertThat(mapping.get(TextStructureUtils.MAPPING_TYPE_SETTING), equalTo("keyword")); + }; + + ecsCompatibilityModes.forEach(testGuessMappingGivenEcsCompatibility); + } + + private Map guessMapping( + List explanation, + String fieldName, + List fieldValues, + boolean ecsCompatibility + ) { Tuple, FieldStats> mappingAndFieldStats = TextStructureUtils.guessMappingAndCalculateFieldStats( explanation, fieldName, fieldValues, - NOOP_TIMEOUT_CHECKER + NOOP_TIMEOUT_CHECKER, + ecsCompatibility ); return (mappingAndFieldStats == null) ? null : mappingAndFieldStats.v1(); } diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinderTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinderTests.java index c55f4a53fa468..31929e38cdbdb 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinderTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinderTests.java @@ -18,13 +18,20 @@ import java.time.temporal.TemporalQueries; import java.util.Arrays; import java.util.BitSet; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.function.Consumer; import java.util.regex.Pattern; public class TimestampFormatFinderTests extends TextStructureTestCase { + private static final boolean ECS_COMPATIBILITY_DISABLED = false; + private static final boolean ECS_COMPATIBILITY_ENABLED = true; + + private static final Collection ecsCompatibilityModes = Arrays.asList(ECS_COMPATIBILITY_ENABLED, ECS_COMPATIBILITY_DISABLED); + @SuppressWarnings("checkstyle:linelength") private static final String EXCEPTION_TRACE_SAMPLE = """ @@ -136,59 +143,72 @@ public void testInvalidOverrideFormatToGrokAndRegex() { public void testMakeCandidateFromOverrideFormat() { - // Override is a special format - assertSame( - TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT, - TimestampFormatFinder.makeCandidateFromOverrideFormat("ISO8601", NOOP_TIMEOUT_CHECKER) - ); - assertSame( - TimestampFormatFinder.UNIX_MS_CANDIDATE_FORMAT, - TimestampFormatFinder.makeCandidateFromOverrideFormat("UNIX_MS", NOOP_TIMEOUT_CHECKER) - ); - assertSame( - TimestampFormatFinder.UNIX_CANDIDATE_FORMAT, - TimestampFormatFinder.makeCandidateFromOverrideFormat("UNIX", NOOP_TIMEOUT_CHECKER) - ); - assertSame( - TimestampFormatFinder.TAI64N_CANDIDATE_FORMAT, - TimestampFormatFinder.makeCandidateFromOverrideFormat("TAI64N", NOOP_TIMEOUT_CHECKER) - ); - - // Override is covered by a built-in format - TimestampFormatFinder.CandidateTimestampFormat candidate = TimestampFormatFinder.makeCandidateFromOverrideFormat( - "yyyy-MM-dd'T'HH:mm:ss.SSS", - NOOP_TIMEOUT_CHECKER - ); - assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.outputGrokPatternName, candidate.outputGrokPatternName); - assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.strictGrokPattern, candidate.strictGrokPattern); - // Can't compare Grok objects as Grok doesn't implement equals() - assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.simplePattern.pattern(), candidate.simplePattern.pattern()); - // Exact format supplied is returned if it matches - assertEquals( - Collections.singletonList("yyyy-MM-dd'T'HH:mm:ss.SSS"), - candidate.javaTimestampFormatSupplier.apply("2018-05-15T16:14:56.374") - ); - // Other supported formats are returned if exact format doesn't match - assertEquals(Collections.singletonList("ISO8601"), candidate.javaTimestampFormatSupplier.apply("2018-05-15T16:14:56,374")); - - // Override is supported but not covered by any built-in format - candidate = TimestampFormatFinder.makeCandidateFromOverrideFormat("MM/dd/yyyy H:mm:ss zzz", NOOP_TIMEOUT_CHECKER); - assertEquals(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, candidate.outputGrokPatternName); - assertEquals("%{MONTHNUM2}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ}", candidate.strictGrokPattern); - assertEquals("\\b\\d{2}/\\d{2}/\\d{4} \\d{1,2}:\\d{2}:\\d{2} [A-Z]{3}\\b", candidate.simplePattern.pattern()); - assertEquals( - Collections.singletonList("MM/dd/yyyy H:mm:ss zzz"), - candidate.javaTimestampFormatSupplier.apply("05/15/2018 16:14:56 UTC") - ); - - candidate = TimestampFormatFinder.makeCandidateFromOverrideFormat("M/d/yyyy H:mm:ss zzz", NOOP_TIMEOUT_CHECKER); - assertEquals(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, candidate.outputGrokPatternName); - assertEquals("%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ}", candidate.strictGrokPattern); - assertEquals("\\b\\d{1,2}/\\d{1,2}/\\d{4} \\d{1,2}:\\d{2}:\\d{2} [A-Z]{3}\\b", candidate.simplePattern.pattern()); - assertEquals( - Collections.singletonList("M/d/yyyy H:mm:ss zzz"), - candidate.javaTimestampFormatSupplier.apply("5/15/2018 16:14:56 UTC") - ); + Consumer testMakeCandidateFromOverrideFormatGivenEcsCompatibility = (ecsCompatibility) -> { + // Override is a special format + assertSame( + TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("ISO8601", NOOP_TIMEOUT_CHECKER, ecsCompatibility) + ); + assertSame( + TimestampFormatFinder.UNIX_MS_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("UNIX_MS", NOOP_TIMEOUT_CHECKER, ecsCompatibility) + ); + assertSame( + TimestampFormatFinder.UNIX_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("UNIX", NOOP_TIMEOUT_CHECKER, ecsCompatibility) + ); + assertSame( + TimestampFormatFinder.TAI64N_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("TAI64N", NOOP_TIMEOUT_CHECKER, ecsCompatibility) + ); + + // Override is covered by a built-in format + TimestampFormatFinder.CandidateTimestampFormat candidate = TimestampFormatFinder.makeCandidateFromOverrideFormat( + "yyyy-MM-dd'T'HH:mm:ss.SSS", + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.outputGrokPatternName, candidate.outputGrokPatternName); + assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.strictGrokPattern, candidate.strictGrokPattern); + // Can't compare Grok objects as Grok doesn't implement equals() + assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.simplePattern.pattern(), candidate.simplePattern.pattern()); + // Exact format supplied is returned if it matches + assertEquals( + Collections.singletonList("yyyy-MM-dd'T'HH:mm:ss.SSS"), + candidate.javaTimestampFormatSupplier.apply("2018-05-15T16:14:56.374") + ); + // Other supported formats are returned if exact format doesn't match + assertEquals(Collections.singletonList("ISO8601"), candidate.javaTimestampFormatSupplier.apply("2018-05-15T16:14:56,374")); + + // Override is supported but not covered by any built-in format + candidate = TimestampFormatFinder.makeCandidateFromOverrideFormat( + "MM/dd/yyyy H:mm:ss zzz", + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + assertEquals(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, candidate.outputGrokPatternName); + assertEquals("%{MONTHNUM2}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ}", candidate.strictGrokPattern); + assertEquals("\\b\\d{2}/\\d{2}/\\d{4} \\d{1,2}:\\d{2}:\\d{2} [A-Z]{3}\\b", candidate.simplePattern.pattern()); + assertEquals( + Collections.singletonList("MM/dd/yyyy H:mm:ss zzz"), + candidate.javaTimestampFormatSupplier.apply("05/15/2018 16:14:56 UTC") + ); + + candidate = TimestampFormatFinder.makeCandidateFromOverrideFormat( + "M/d/yyyy H:mm:ss zzz", + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + assertEquals(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, candidate.outputGrokPatternName); + assertEquals("%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ}", candidate.strictGrokPattern); + assertEquals("\\b\\d{1,2}/\\d{1,2}/\\d{4} \\d{1,2}:\\d{2}:\\d{2} [A-Z]{3}\\b", candidate.simplePattern.pattern()); + assertEquals( + Collections.singletonList("M/d/yyyy H:mm:ss zzz"), + candidate.javaTimestampFormatSupplier.apply("5/15/2018 16:14:56 UTC") + ); + }; + + ecsCompatibilityModes.forEach(testMakeCandidateFromOverrideFormatGivenEcsCompatibility); } public void testRequiresTimezoneDependentParsing() { @@ -1206,36 +1226,42 @@ public void testCustomOverrideMatchingBuiltInFormat() { String expectedSimpleRegex = "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}"; String expectedGrokPatternName = "TIMESTAMP_ISO8601"; - TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder( - explanation, - overrideFormat, - true, - true, - true, - NOOP_TIMEOUT_CHECKER - ); - strictTimestampFormatFinder.addSample(text); - assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); - assertEquals(Collections.emptyMap(), strictTimestampFormatFinder.getCustomGrokPatternDefinitions()); - assertEquals(expectedSimpleRegex, strictTimestampFormatFinder.getSimplePattern().pattern()); - assertEquals(Collections.singletonList(overrideFormat), strictTimestampFormatFinder.getJavaTimestampFormats()); - assertEquals(1, strictTimestampFormatFinder.getNumMatchedFormats()); - - TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder( - explanation, - overrideFormat, - false, - false, - false, - NOOP_TIMEOUT_CHECKER - ); - lenientTimestampFormatFinder.addSample(text); - lenientTimestampFormatFinder.selectBestMatch(); - assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); - assertEquals(Collections.emptyMap(), lenientTimestampFormatFinder.getCustomGrokPatternDefinitions()); - assertEquals(expectedSimpleRegex, lenientTimestampFormatFinder.getSimplePattern().pattern()); - assertEquals(Collections.singletonList(overrideFormat), lenientTimestampFormatFinder.getJavaTimestampFormats()); - assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); + Consumer testCustomOverrideMatchingBuiltInFormatGivenEcsCompatibility = (ecsCompatibility) -> { + TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder( + explanation, + overrideFormat, + true, + true, + true, + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + strictTimestampFormatFinder.addSample(text); + assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); + assertEquals(Collections.emptyMap(), strictTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, strictTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), strictTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, strictTimestampFormatFinder.getNumMatchedFormats()); + + TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder( + explanation, + overrideFormat, + false, + false, + false, + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + lenientTimestampFormatFinder.addSample(text); + lenientTimestampFormatFinder.selectBestMatch(); + assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); + assertEquals(Collections.emptyMap(), lenientTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, lenientTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), lenientTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); + }; + + ecsCompatibilityModes.forEach(testCustomOverrideMatchingBuiltInFormatGivenEcsCompatibility); } public void testCustomOverridesNotMatchingBuiltInFormat() { @@ -1295,36 +1321,42 @@ private void validateCustomOverrideNotMatchingBuiltInFormat( String expectedGrokPatternName, Map expectedCustomGrokPatternDefinitions ) { - TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder( - explanation, - overrideFormat, - true, - true, - true, - NOOP_TIMEOUT_CHECKER - ); - strictTimestampFormatFinder.addSample(text); - assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); - assertEquals(expectedCustomGrokPatternDefinitions, strictTimestampFormatFinder.getCustomGrokPatternDefinitions()); - assertEquals(expectedSimpleRegex, strictTimestampFormatFinder.getSimplePattern().pattern()); - assertEquals(Collections.singletonList(overrideFormat), strictTimestampFormatFinder.getJavaTimestampFormats()); - assertEquals(1, strictTimestampFormatFinder.getNumMatchedFormats()); - - TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder( - explanation, - overrideFormat, - false, - false, - false, - NOOP_TIMEOUT_CHECKER - ); - lenientTimestampFormatFinder.addSample(text); - lenientTimestampFormatFinder.selectBestMatch(); - assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); - assertEquals(expectedCustomGrokPatternDefinitions, lenientTimestampFormatFinder.getCustomGrokPatternDefinitions()); - assertEquals(expectedSimpleRegex, lenientTimestampFormatFinder.getSimplePattern().pattern()); - assertEquals(Collections.singletonList(overrideFormat), lenientTimestampFormatFinder.getJavaTimestampFormats()); - assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); + Consumer validateCustomOverrideNotMatchingBuiltInFormatGivenEcsCompatibility = (ecsCompatibility) -> { + TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder( + explanation, + overrideFormat, + true, + true, + true, + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + strictTimestampFormatFinder.addSample(text); + assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); + assertEquals(expectedCustomGrokPatternDefinitions, strictTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, strictTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), strictTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, strictTimestampFormatFinder.getNumMatchedFormats()); + + TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder( + explanation, + overrideFormat, + false, + false, + false, + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + lenientTimestampFormatFinder.addSample(text); + lenientTimestampFormatFinder.selectBestMatch(); + assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); + assertEquals(expectedCustomGrokPatternDefinitions, lenientTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, lenientTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), lenientTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); + }; + + ecsCompatibilityModes.forEach(validateCustomOverrideNotMatchingBuiltInFormatGivenEcsCompatibility); } public void testFindFormatGivenRealLogMessages() { @@ -1350,7 +1382,7 @@ public void testFindFormatGivenRealLogMessages() { validateFindInFullMessage( "Aug 29, 2009 12:03:57 AM org.apache.tomcat.util.http.Parameters processParameters", "", - "CATALINA_DATESTAMP", + "CATALINA7_DATESTAMP", "\\b[A-Z]\\S{2} \\d{2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", "MMM dd, yyyy h:mm:ss a" ); @@ -1410,21 +1442,27 @@ public void testFindFormatGivenRealLogMessages() { "yyyy-MM-dd HH:mm:ss.SSSSSS" ); - // Non-matching required format specified - TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder( - explanation, - randomFrom("UNIX", "EEE MMM dd yyyy HH:mm zzz"), - false, - false, - false, - NOOP_TIMEOUT_CHECKER - ); - timestampFormatFinder.addSample( - "2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " - + " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)" - ); - assertEquals(Collections.emptyList(), timestampFormatFinder.getJavaTimestampFormats()); - assertEquals(0, timestampFormatFinder.getNumMatchedFormats()); + Consumer testFindFormatWithNonMatchingRequiredFormatGivenEcsCompatibility = (ecsCompatibility) -> { + // Non-matching required format specified + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder( + explanation, + randomFrom("UNIX", "EEE MMM dd yyyy HH:mm zzz"), + false, + false, + false, + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + timestampFormatFinder.addSample( + "2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + + " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5)" + + " by client 'User1'(id:2) in channel '3er Instanz'(id:2)" + ); + assertEquals(Collections.emptyList(), timestampFormatFinder.getJavaTimestampFormats()); + assertEquals(0, timestampFormatFinder.getNumMatchedFormats()); + }; + + ecsCompatibilityModes.forEach(testFindFormatWithNonMatchingRequiredFormatGivenEcsCompatibility); } public void testSelectBestMatchGivenAllSame() { @@ -1666,24 +1704,35 @@ private void validateFindInFullMessage( List expectedJavaTimestampFormats ) { - Pattern expectedSimplePattern = Pattern.compile(expectedSimpleRegex); - assertTrue(expectedSimplePattern.matcher(message).find()); + Consumer validateFindInFullMessageGivenEcsCompatibility = (ecsCompatibility) -> { + Pattern expectedSimplePattern = Pattern.compile(expectedSimpleRegex); + assertTrue(expectedSimplePattern.matcher(message).find()); + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder( + explanation, + timestampFormatOverride, + false, + false, + false, + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); + timestampFormatFinder.addSample(message); + timestampFormatFinder.selectBestMatch(); + if ("CATALINA7_DATESTAMP".equals(expectedGrokPatternName)) { + if (ecsCompatibility) { + assertEquals(expectedGrokPatternName, timestampFormatFinder.getGrokPatternName()); + } else { + assertEquals("CATALINA_DATESTAMP", timestampFormatFinder.getGrokPatternName()); + } + } + assertEquals(expectedSimplePattern.pattern(), timestampFormatFinder.getSimplePattern().pattern()); + assertEquals(expectedJavaTimestampFormats, timestampFormatFinder.getJavaTimestampFormats()); + assertEquals(Collections.singletonList(expectedPreface), timestampFormatFinder.getPrefaces()); + assertEquals(1, timestampFormatFinder.getNumMatchedFormats()); + }; - TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder( - explanation, - timestampFormatOverride, - false, - false, - false, - NOOP_TIMEOUT_CHECKER - ); - timestampFormatFinder.addSample(message); - timestampFormatFinder.selectBestMatch(); - assertEquals(expectedGrokPatternName, timestampFormatFinder.getGrokPatternName()); - assertEquals(expectedSimplePattern.pattern(), timestampFormatFinder.getSimplePattern().pattern()); - assertEquals(expectedJavaTimestampFormats, timestampFormatFinder.getJavaTimestampFormats()); - assertEquals(Collections.singletonList(expectedPreface), timestampFormatFinder.getPrefaces()); - assertEquals(1, timestampFormatFinder.getNumMatchedFormats()); + ecsCompatibilityModes.forEach(validateFindInFullMessageGivenEcsCompatibility); } private void validateJavaTimestampFormats(List javaTimestampFormats, String text, long expectedEpochMs) { From b81f4187abe788f405be74cb95126066404f8fb4 Mon Sep 17 00:00:00 2001 From: Christos Soulios <1561376+csoulios@users.noreply.github.com> Date: Thu, 4 Aug 2022 20:42:34 +0300 Subject: [PATCH 111/265] [TSDB] Metric fields in the field caps API (#88695) To assist the user in configuring the visualizations correctly while leveraging TSDB functionality, information about TSDB configuration should be exposed via the field caps API per field. Especially for metrics fields, it must be clear which fields are metrics and if they belong to only time-series indexes or mixed time-series and non-time-series indexes. To further distinguish metric fields when they belong to any of the following indices: - Standard (non-time-series) indexes - Time series indexes - Downsampled time series indexes This PR modifies the field caps API so that the mapping parameters time_series_dimension and time_series_dimension are presented only when they are set on fields of time-series indexes. Those parameters are completely ignored when they are set on standard (non-time-series) indexes. This PR revisits some of the conventions adopted by #78790 --- docs/changelog/88695.yaml | 5 + docs/reference/search/field-caps.asciidoc | 10 +- .../test/field_caps/40_time_series.yml | 10 + ...dimension_and_metric_in_non_tsdb_index.yml | 6 +- .../test/tsdb/110_field_caps.yml | 231 ++++++++++++++++++ .../rest-api-spec/test/tsdb/40_search.yml | 39 --- .../search/fieldcaps/FieldCapabilitiesIT.java | 11 +- .../support/TimeSeriesMetricsIT.java | 70 +++--- .../fieldcaps/FieldCapabilitiesFetcher.java | 11 +- .../fieldcaps/IndexFieldCapabilities.java | 1 - .../test/aggregate-metrics/110_field_caps.yml | 159 ++++++++++++ 11 files changed, 474 insertions(+), 79 deletions(-) create mode 100644 docs/changelog/88695.yaml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/110_field_caps.yml create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/110_field_caps.yml diff --git a/docs/changelog/88695.yaml b/docs/changelog/88695.yaml new file mode 100644 index 0000000000000..cd8f9b6cb866a --- /dev/null +++ b/docs/changelog/88695.yaml @@ -0,0 +1,5 @@ +pr: 88695 +summary: "[TSDB] Metric fields in the field caps API" +area: TSDB +type: enhancement +issues: [] diff --git a/docs/reference/search/field-caps.asciidoc b/docs/reference/search/field-caps.asciidoc index fda14aed559fa..e51bf01e2f651 100644 --- a/docs/reference/search/field-caps.asciidoc +++ b/docs/reference/search/field-caps.asciidoc @@ -137,11 +137,13 @@ field types are all described as the `keyword` type family. `time_series_dimension`:: preview:[] - Whether this field is used as a time series dimension. + Whether this field is used as a time series dimension on all indices. + For non-time-series indices this field is not present. `time_series_metric`:: preview:[] - Contains metric type if this fields is used as a time series metrics, absent if the field is not used as metric. + Contains the metric type if the field is used as a time series metric on all indices, absent if the field is + not used as a metric. For non-time-series indices this field is not included. `indices`:: The list of indices where this field has the same type family, or null if all indices @@ -157,12 +159,12 @@ field types are all described as the `keyword` type family. `non_dimension_indices`:: experimental:[] - If this list is present in response then some indices have the field marked as a dimension and other indices, the + If this list is present in the response, some indices have the field marked as a dimension and other indices, the ones in this list, do not. `metric_conflicts_indices`:: experimental:[] - The list of indices where this field is present if these indices don't have the same `time_series_metric` value for + The list of indices where this field is present, if these indices don't have the same `time_series_metric` value for this field. `meta`:: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml index ad36eaf872d57..345939c44883a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml @@ -12,6 +12,11 @@ setup: index: number_of_replicas: 0 number_of_shards: 2 + mode: time_series + routing_path: [ metricset, k8s.pod.uid ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: properties: "@timestamp": @@ -59,6 +64,11 @@ setup: index: number_of_replicas: 0 number_of_shards: 2 + mode: time_series + routing_path: [ k8s.pod.uid ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z mappings: properties: "@timestamp": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml index 4b6a376637617..458b27149fd87 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml @@ -183,8 +183,8 @@ can't shadow metrics: # Test that _tsid field is not added if an index is not a time-series index no _tsid in standard indices: - skip: - version: " - 8.0.99" - reason: _tsid support introduced in 8.1.0 + version: " - 8.4.99" + reason: time series params only on time series indices introduced in 8.5.0 - do: indices.create: @@ -209,11 +209,11 @@ no _tsid in standard indices: - match: {fields.metricset.keyword.searchable: true} - match: {fields.metricset.keyword.aggregatable: true} - - match: {fields.metricset.keyword.time_series_dimension: true} - is_false: fields.metricset.keyword.indices - is_false: fields.metricset.keyword.non_searchable_indices - is_false: fields.metricset.keyword.non_aggregatable_indices - is_false: fields._tsid # _tsid metadata field must not exist in non-time-series indices + - is_false: fields.metricset.keyword.time_series_dimension # time_series_dimension param is ignored in non-time-series indices --- no nested dimensions: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/110_field_caps.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/110_field_caps.yml new file mode 100644 index 0000000000000..7af36d5cfe7a0 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/110_field_caps.yml @@ -0,0 +1,231 @@ +setup: + - skip: + version: " - 8.4.99" + reason: metric params only on time series indexes introduced in 8.5.0 + + - do: + indices.create: + index: test_time_series + body: + settings: + index: + number_of_replicas: 0 + number_of_shards: 2 + mode: time_series + routing_path: [ metricset, k8s.pod.uid ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + availability_zone: + type: short + time_series_dimension: true + uid: + type: keyword + time_series_dimension: true + name: + type: keyword + ip: + type: ip + time_series_dimension: true + network: + properties: + tx: + type: long + time_series_metric: counter + rx: + type: long + time_series_metric: gauge + + - do: + indices.create: + index: test_non_time_series + body: + settings: + index: + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + availability_zone: + type: short + time_series_dimension: true + uid: + type: keyword + time_series_dimension: true + name: + type: keyword + ip: + type: ip + time_series_dimension: true + network: + properties: + tx: + type: long + time_series_metric: counter + rx: + type: long + time_series_metric: gauge + +--- +field caps on time_series indices: + - skip: + version: " - 8.3.99" + reason: metric params only on time series indexes introduced in 8.4.0 + + - do: + field_caps: + index: test_time_series + fields: [ k8s.pod.uid, k8s.pod.network.rx, k8s.pod.network.tx, k8s.pod.ip, metricset, _tsid ] + + - match: { fields.k8s\.pod\.uid.keyword.type: keyword } + - match: { fields.k8s\.pod\.uid.keyword.searchable: true } + - match: { fields.k8s\.pod\.uid.keyword.aggregatable: true } + - match: { fields.k8s\.pod\.uid.keyword.time_series_dimension: true } + - is_false: fields.k8s\.pod\.uid.keyword.indices + - is_false: fields.k8s\.pod\.uid.keyword.non_searchable_indices + - is_false: fields.k8s\.pod\.uid.keyword.non_aggregatable_indices + + - match: { fields.k8s\.pod\.network\.rx.long.type: long } + - match: { fields.k8s\.pod\.network\.rx.long.searchable: true } + - match: { fields.k8s\.pod\.network\.rx.long.aggregatable: true } + - match: { fields.k8s\.pod\.network\.rx.long.time_series_metric: gauge } + - is_false: fields.k8s\.pod\.network\.tx.long.metric_conflicts_indices + - is_false: fields.k8s\.pod\.network\.rx.long.indices + - is_false: fields.k8s\.pod\.network\.rx.long.non_searchable_indices + - is_false: fields.k8s\.pod\.network\.rx.long.non_aggregatable_indices + + + - match: { fields.k8s\.pod\.network\.tx.long.type: long } + - match: { fields.k8s\.pod\.network\.tx.long.searchable: true } + - match: { fields.k8s\.pod\.network\.tx.long.aggregatable: true } + - match: { fields.k8s\.pod\.network\.tx.long.time_series_metric: counter } + - is_false: fields.k8s\.pod\.network\.tx.long.metric_conflicts_indices + - is_false: fields.k8s\.pod\.network\.tx.long.indices + - is_false: fields.k8s\.pod\.network\.tx.long.non_searchable_indices + - is_false: fields.k8s\.pod\.network\.tx.long.non_aggregatable_indices + + - match: { fields.k8s\.pod\.ip.ip.type: ip } + - match: { fields.k8s\.pod\.ip.ip.searchable: true } + - match: { fields.k8s\.pod\.ip.ip.aggregatable: true } + - is_false: fields.k8s\.pod\.ip.ip.indices + - is_false: fields.k8s\.pod\.ip.ip.non_searchable_indices + - is_false: fields.k8s\.pod\.ip.ip.non_aggregatable_indices + + - match: { fields.metricset.keyword.type: keyword } + - match: { fields.metricset.keyword.searchable: true } + - match: { fields.metricset.keyword.aggregatable: true } + - match: { fields.metricset.keyword.time_series_dimension: true } + - is_false: fields.metricset.keyword.non_dimension_indices + - is_false: fields.metricset.keyword.indices + - is_false: fields.metricset.keyword.non_searchable_indices + - is_false: fields.metricset.keyword.non_aggregatable_indices + + - match: { fields._tsid._tsid.metadata_field: true } + - match: { fields._tsid._tsid.searchable: false } + - match: { fields._tsid._tsid.aggregatable: true } + - is_false: fields._tsid._tsid.indices + - is_false: fields._tsid._tsid.non_searchable_indices + - is_false: fields._tsid._tsid.non_aggregatable_indices + + +--- +field caps on standard indices: + - skip: + version: " - 8.3.99" + reason: metric params only on time series indexes introduced in 8.4.0 + + - do: + field_caps: + index: test_non_time_series + fields: [ _tsid, metricset, k8s.pod.network.rx, k8s.pod.network.tx, k8s.pod.network.rx ] + + - match: { fields.metricset.keyword.type: keyword } + - match: { fields.metricset.keyword.searchable: true } + - match: { fields.metricset.keyword.aggregatable: true } + - is_false: fields.metricset.keyword.time_series_dimension + - is_false: fields.metricset.keyword.non_dimension_indices + - is_false: fields.metricset.keyword.indices + - is_false: fields.metricset.keyword.non_searchable_indices + - is_false: fields.metricset.keyword.non_aggregatable_indices + + - is_false: fields._tsid # _tsid metadata field must not exist in non-time-series indices + + - match: { fields.k8s\.pod\.network\.rx.long.type: long } + - match: { fields.k8s\.pod\.network\.rx.long.searchable: true } + - match: { fields.k8s\.pod\.network\.rx.long.aggregatable: true } + - is_false: fields.k8s\.pod\.network\.rx.long.time_series_metric + - is_false: fields.k8s\.pod\.network\.rx.long.metric_conflicts_indices + - is_false: fields.k8s\.pod\.network\.rx.long.indices + - is_false: fields.k8s\.pod\.network\.rx.long.non_searchable_indices + - is_false: fields.k8s\.pod\.network\.rx.long.non_aggregatable_indices + - is_false: fields.k8s\.pod\.network\.rx.gauge + + - match: { fields.k8s\.pod\.network\.tx.long.type: long } + - match: { fields.k8s\.pod\.network\.tx.long.searchable: true } + - match: { fields.k8s\.pod\.network\.tx.long.aggregatable: true } + - is_false: fields.k8s\.pod\.network\.tx.long.time_series_metric + - is_false: fields.k8s\.pod\.network\.tx.long.metric_conflicts_indices + - is_false: fields.k8s\.pod\.network\.tx.long.indices + - is_false: fields.k8s\.pod\.network\.tx.long.non_searchable_indices + - is_false: fields.k8s\.pod\.network\.tx.long.non_aggregatable_indices + - is_false: fields.k8s\.pod\.network\.tx.counter + + +--- +field caps on mixed indices: + - skip: + version: " - 8.3.99" + reason: metric params only on time series indexes introduced in 8.4.0 + + - do: + field_caps: + index: test_* + fields: [ metricset, k8s.pod.availability_zone, k8s.pod.network.tx, k8s.pod.network.rx ] + + - match: { fields.metricset.keyword.type: keyword } + - match: { fields.metricset.keyword.searchable: true } + - match: { fields.metricset.keyword.aggregatable: true } + - is_false: fields.metricset.keyword.time_series_dimension + - match: { fields.metricset.keyword.non_dimension_indices: [ "test_non_time_series" ] } + - is_false: fields.metricset.keyword.indices + - is_false: fields.metricset.keyword.non_searchable_indices + - is_false: fields.metricset.keyword.non_aggregatable_indices + + - match: { fields.k8s\.pod\.network\.rx.long.type: long } + - match: { fields.k8s\.pod\.network\.rx.long.searchable: true } + - match: { fields.k8s\.pod\.network\.rx.long.aggregatable: true } + - match: { fields.k8s\.pod\.network\.rx.long.metric_conflicts_indices: [ "test_non_time_series", "test_time_series" ] } + - is_false: fields.k8s\.pod\.network\.rx.long.time_series_metric + - is_false: fields.k8s\.pod\.network\.rx.long.non_searchable_indices + - is_false: fields.k8s\.pod\.network\.rx.long.non_aggregatable_indices + - is_false: fields.k8s\.pod\.network\.rx.long.indices + + - match: { fields.k8s\.pod\.network\.tx.long.type: long } + - match: { fields.k8s\.pod\.network\.tx.long.searchable: true } + - match: { fields.k8s\.pod\.network\.tx.long.aggregatable: true } + - match: { fields.k8s\.pod\.network\.tx.long.metric_conflicts_indices: [ "test_non_time_series", "test_time_series" ] } + - is_false: fields.k8s\.pod\.network\.tx.long.time_series_metric + - is_false: fields.k8s\.pod\.network\.tx.long.non_searchable_indices + - is_false: fields.k8s\.pod\.network\.tx.long.non_aggregatable_indices + - is_false: fields.k8s\.pod\.network\.tx.long.indices + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml index 51e6125558da3..48f650a05d6f9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml @@ -304,45 +304,6 @@ aggregate a tag: term: _tsid: wont't work ---- -field capabilities: - - skip: - version: " - 8.1.99" - reason: tsdb indexing changed in 8.2.0 - - - do: - field_caps: - index: test - fields: [k8s.pod.uid, k8s.pod.network.rx, k8s.pod.ip, metricset, _tsid] - - - match: {fields.k8s\.pod\.uid.keyword.searchable: true} - - match: {fields.k8s\.pod\.uid.keyword.aggregatable: true} - - match: {fields.k8s\.pod\.uid.keyword.time_series_dimension: true} - - is_false: fields.k8s\.pod\.uid.keyword.indices - - is_false: fields.k8s\.pod\.uid.keyword.non_searchable_indices - - is_false: fields.k8s\.pod\.uid.keyword.non_aggregatable_indices - - match: {fields.k8s\.pod\.network\.rx.long.searchable: true} - - match: {fields.k8s\.pod\.network\.rx.long.aggregatable: true} - - is_false: fields.k8s\.pod\.network\.rx.long.indices - - is_false: fields.k8s\.pod\.network\.rx.long.non_searchable_indices - - is_false: fields.k8s\.pod\.network\.rx.long.non_aggregatable_indices - - match: {fields.k8s\.pod\.ip.ip.searchable: true} - - match: {fields.k8s\.pod\.ip.ip.aggregatable: true} - - is_false: fields.k8s\.pod\.ip.ip.indices - - is_false: fields.k8s\.pod\.ip.ip.non_searchable_indices - - is_false: fields.k8s\.pod\.ip.ip.non_aggregatable_indices - - match: {fields.metricset.keyword.searchable: true} - - match: {fields.metricset.keyword.aggregatable: true} - - match: {fields.metricset.keyword.time_series_dimension: true} - - is_false: fields.metricset.keyword.indices - - is_false: fields.metricset.keyword.non_searchable_indices - - is_false: fields.metricset.keyword.non_aggregatable_indices - - match: {fields._tsid._tsid.metadata_field: true} - - match: {fields._tsid._tsid.searchable: false} - - match: {fields._tsid._tsid.aggregatable: true} - - is_false: fields._tsid._tsid.indices - - is_false: fields._tsid._tsid.non_searchable_indices - - is_false: fields._tsid._tsid.non_aggregatable_indices --- sort by tsid: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index a52940ae9a413..f54c5b529caaa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -24,7 +24,9 @@ import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -119,7 +121,14 @@ public void setUp() throws Exception { .endObject() .endObject() .endObject(); - assertAcked(prepareCreate("old_index").setMapping(oldIndexMapping)); + + Settings settings = Settings.builder() + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of("some_dimension")) + .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2006-01-08T23:40:53.384Z") + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z") + .build(); + assertAcked(prepareCreate("old_index").setSettings(settings).setMapping(oldIndexMapping)); XContentBuilder newIndexMapping = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesMetricsIT.java index 9fa91ac09ef4d..34861d4240da0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesMetricsIT.java @@ -12,12 +12,14 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.aggregations.MultiBucketConsumerService; @@ -29,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -80,7 +83,9 @@ public void assertSmallSimple(Object d1, Object d2, CheckedConsumer> gen) throws I expectedLatest = expectedLatest.entry(dimensions, List.of(Map.entry(max, value))); expectedValues = expectedValues.entry(dimensions, expectedValuesForTimeSeries); } - indexRandom(true, docs); + indexRandom(true, false, docs); assertMap(latest(iterationSize, TimeValue.timeValueMillis(maxMillis - minMillis), maxMillis), expectedLatest); assertMap( range( @@ -328,7 +334,7 @@ public void testManySteps() throws InterruptedException, ExecutionException, IOE expectedValues.add(Map.entry(timestamp, v)); docs.add(client().prepareIndex("tsdb").setSource(Map.of("@timestamp", timestamp, "dim", "dim", "v", v))); } - indexRandom(true, docs); + indexRandom(true, false, docs); assertMap( range( iterationBuckets, @@ -356,10 +362,11 @@ private void createTsdbIndex(String... keywordDimensions) throws IOException { for (String d : keywordDimensions) { mapping.startObject(d).field("type", "keyword").field("time_series_dimension", true).endObject(); } - }); + }, Arrays.asList(keywordDimensions)); } - private void createTsdbIndex(CheckedConsumer dimensionMapping) throws IOException { + private void createTsdbIndex(CheckedConsumer dimensionMapping, List routingDims) + throws IOException { XContentBuilder mapping = JsonXContent.contentBuilder(); mapping.startObject().startObject("properties"); mapping.startObject("@timestamp").field("type", "date").endObject(); @@ -367,7 +374,14 @@ private void createTsdbIndex(CheckedConsumer dimen mapping.startObject("m").field("type", "double").field("time_series_metric", "gauge").endObject(); dimensionMapping.accept(mapping); mapping.endObject().endObject(); - client().admin().indices().prepareCreate("tsdb").setMapping(mapping).get(); + + Settings settings = Settings.builder() + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), routingDims) + .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2000-01-08T23:40:53.384Z") + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z") + .build(); + client().admin().indices().prepareCreate("tsdb").setSettings(settings).setMapping(mapping).get(); } private Map>, List>> latest( diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java index 08486b6a4aca3..fdac89eec7a4e 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java @@ -110,18 +110,23 @@ static Map retrieveFieldCaps( boolean includeParentObjects = checkIncludeParents(filters); Predicate filter = buildFilter(indexFieldfilter, filters, types, context); + boolean isTimeSeriesIndex = context.getIndexSettings().getTimestampBounds() != null; Map responseMap = new HashMap<>(); for (String field : fieldNames) { MappedFieldType ft = context.getFieldType(field); if (filter.test(ft)) { IndexFieldCapabilities fieldCap = new IndexFieldCapabilities( field, - ft.familyTypeName(), + // This is a nasty hack so that we expose aggregate_metric_double field, + // when the index is a time series index and the field is marked as metric. + // This code should be reverted once PR https://github.com/elastic/elasticsearch/pull/87849 + // is merged. + isTimeSeriesIndex && ft.getMetricType() != null ? ft.typeName() : ft.familyTypeName(), context.isMetadataField(field), ft.isSearchable(), ft.isAggregatable(), - ft.isDimension(), - ft.getMetricType(), + isTimeSeriesIndex ? ft.isDimension() : false, + isTimeSeriesIndex ? ft.getMetricType() : null, ft.meta() ); responseMap.put(field, fieldCap); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java index 674a8e7da8456..383f5f2a0d4f2 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java @@ -52,7 +52,6 @@ public class IndexFieldCapabilities implements Writeable { TimeSeriesParams.MetricType metricType, Map meta ) { - this.name = name; this.type = type; this.isMetadatafield = isMetadatafield; diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/110_field_caps.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/110_field_caps.yml new file mode 100644 index 0000000000000..29fc5226c4cda --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/110_field_caps.yml @@ -0,0 +1,159 @@ +setup: + - skip: + version: " - 8.4.99" + reason: metric params only on time series indices introduced in 8.5.0 + + - do: + indices.create: + index: test_rollup + body: + settings: + index: + number_of_replicas: 0 + number_of_shards: 2 + mode: time_series + routing_path: [ metricset, k8s.pod.uid ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + metric: + type: aggregate_metric_double + metrics: [ min, max, sum, value_count ] + default_metric: max + time_series_metric: gauge + + - do: + indices.create: + index: test_time_series + body: + settings: + index: + number_of_replicas: 0 + number_of_shards: 2 + mode: time_series + routing_path: [ metricset, k8s.pod.uid ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + metric: + type: double + time_series_metric: gauge + + - do: + indices.create: + index: test_non_time_series + body: + settings: + index: + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + metric: + type: double + time_series_metric: gauge + + +--- +# Test field_caps on a rollup index +field caps on rollup indices: + - skip: + version: " - 8.4.99" + reason: metric params only on time series indices introduced in 8.5.0 + + - do: + field_caps: + index: test_rollup + fields: [ metric ] + + - match: { fields.metric.aggregate_metric_double.type: aggregate_metric_double } + - match: { fields.metric.aggregate_metric_double.searchable: true } + - match: { fields.metric.aggregate_metric_double.aggregatable: true } + - match: { fields.metric.aggregate_metric_double.time_series_metric: gauge } + - is_false: fields.metric.aggregate_metric_double.indices + - is_false: fields.metric.aggregate_metric_double.non_searchable_indices + - is_false: fields.metric.aggregate_metric_double.non_aggregatable_indices + - is_false: fields.metric.aggregate_metric_double.metric_conflicts_indices + - is_false: fields.metric.double + + +--- +# Test field_caps on time-series index (mix of raw and rollup indices) +field caps on time series indices: + - skip: + version: " - 8.4.99" + reason: metric params only on time series indices introduced in 8.5.0 + + - do: + field_caps: + index: [ test_time_series, test_rollup ] + fields: [ metric ] + + - match: { fields.metric.double.type: double } + - match: { fields.metric.double.searchable: true } + - match: { fields.metric.double.aggregatable: true } + - match: { fields.metric.double.time_series_metric: gauge } + - match: { fields.metric.double.indices: [ "test_time_series" ] } + - is_false: fields.metric.double.non_searchable_indices + - is_false: fields.metric.double.non_aggregatable_indices + - is_false: fields.metric.double.metric_conflicts_indices + + - match: { fields.metric.aggregate_metric_double.type: aggregate_metric_double } + - match: { fields.metric.aggregate_metric_double.searchable: true } + - match: { fields.metric.aggregate_metric_double.aggregatable: true } + - match: { fields.metric.aggregate_metric_double.time_series_metric: gauge } + - match: { fields.metric.aggregate_metric_double.indices: [ "test_rollup" ] } + + - is_false: fields.metric.aggregate_metric_double.non_searchable_indices + - is_false: fields.metric.aggregate_metric_double.non_aggregatable_indices + - is_false: fields.metric.aggregate_metric_double.metric_conflicts_indices + +--- +# Test field_caps on mixed standard and time-series (mix of raw and rollup) indices +field caps on all indices: + - skip: + version: " - 8.4.99" + reason: metric params only on time series indices introduced in 8.5.0 + + - do: + field_caps: + index: [ test_time_series, test_rollup, test_non_time_series ] + fields: [ metric ] + + - match: { fields.metric.double.type: double } + - match: { fields.metric.double.searchable: true } + - match: { fields.metric.double.aggregatable: true } + - match: { fields.metric.double.indices: [ "test_non_time_series", "test_time_series" ] } + - match: { fields.metric.double.metric_conflicts_indices: [ "test_non_time_series", "test_time_series" ] } + - is_false: fields.metric.double.non_searchable_indices + - is_false: fields.metric.double.non_aggregatable_indices + - is_false: fields.metric.double.time_series_metric + + - match: { fields.metric.aggregate_metric_double.type: aggregate_metric_double } + - match: { fields.metric.aggregate_metric_double.searchable: true } + - match: { fields.metric.aggregate_metric_double.aggregatable: true } + - match: { fields.metric.aggregate_metric_double.time_series_metric: gauge } + - match: { fields.metric.aggregate_metric_double.indices: [ "test_rollup" ] } + + - is_false: fields.metric.aggregate_metric_double.non_searchable_indices + - is_false: fields.metric.aggregate_metric_double.non_aggregatable_indices + - is_false: fields.metric.aggregate_metric_double.metric_conflicts_indices From f28f4545b24e7912a8623f5f3da9147f28e7d7ad Mon Sep 17 00:00:00 2001 From: likzn <1020193211@qq.com> Date: Fri, 5 Aug 2022 01:44:50 +0800 Subject: [PATCH 112/265] In the field capabilities API, re-add support for `fields` in the request body (#88972) We previously removed support for `fields` in the request body, to ensure there was only one way to specify the parameter. We've now decided to undo the change, since it was disruptive and the request body is actually the best place to pass variable-length data like `fields`. This PR restores support for `fields` in the request body. It throws an error if the parameter is specified both in the URL and the body. Closes #86875 --- CHANGELOG.md | 2 +- docs/changelog/88972.yaml | 6 +++ .../test/field_caps/10_basic.yml | 16 +++++- .../action/RestFieldCapabilitiesAction.java | 20 +++++-- .../FieldCapabilitiesRequestTests.java | 17 ++++++ .../RestFieldCapabilitiesActionTests.java | 54 +++++++++++++++++++ 6 files changed, 109 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/88972.yaml create mode 100644 server/src/test/java/org/elasticsearch/rest/action/RestFieldCapabilitiesActionTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a965ee4b6eb0..769855531e35c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,3 @@ -# Elasticsearch Changlog +# Elasticsearch Changelog Please see the [release notes](https://www.elastic.co/guide/en/elasticsearch/reference/current/es-release-notes.html) in the reference manual. diff --git a/docs/changelog/88972.yaml b/docs/changelog/88972.yaml new file mode 100644 index 0000000000000..9f50a933ec453 --- /dev/null +++ b/docs/changelog/88972.yaml @@ -0,0 +1,6 @@ +pr: 88972 +summary: In the field capabilities API, re-add support for fields in the request body +area: Search +type: enhancement +issues: + - 86875 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml index e1817c9c607ef..ad641f83b47ad 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/10_basic.yml @@ -121,7 +121,6 @@ setup: --- "Get simple field caps": - - do: field_caps: index: 'test1,test2,test3' @@ -162,6 +161,21 @@ setup: - match: {fields.geo.keyword.indices: ["test3"]} - is_false: fields.geo.keyword.non_searchable_indices - is_false: fields.geo.keyword.on_aggregatable_indices + +--- +"Get field caps with fields in body": + - skip: + version: " - 8.4.99" + reason: re-added support for fields in the request body in 8.5 + - do: + field_caps: + index: 'test1,test2,test3' + body: + fields: [text] + + - match: {fields.text.text.searchable: true} + - match: {fields.text.text.aggregatable: false} + - is_false: fields.keyword --- "Get date_nanos field caps": diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java index 879faff87e0b6..2ddc1a106dbc3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java @@ -23,6 +23,7 @@ import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xcontent.ObjectParser.fromList; public class RestFieldCapabilitiesAction extends BaseRestHandler { @@ -43,10 +44,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - String[] indices = Strings.splitStringByCommaToArray(request.param("index")); - FieldCapabilitiesRequest fieldRequest = new FieldCapabilitiesRequest().fields( - Strings.splitStringByCommaToArray(request.param("fields")) - ).indices(indices); + final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); + final FieldCapabilitiesRequest fieldRequest = new FieldCapabilitiesRequest(); + fieldRequest.indices(indices); fieldRequest.indicesOptions(IndicesOptions.fromRequest(request, fieldRequest.indicesOptions())); fieldRequest.includeUnmapped(request.paramAsBoolean("include_unmapped", false)); @@ -57,16 +57,28 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC PARSER.parse(parser, fieldRequest, null); } }); + if (request.hasParam("fields")) { + if (fieldRequest.fields().length > 0) { + throw new IllegalArgumentException( + "can't specify a request body and [fields]" + + " request parameter, either specify a request body or the" + + " [fields] request parameter" + ); + } + fieldRequest.fields(Strings.splitStringByCommaToArray(request.param("fields"))); + } return channel -> client.fieldCaps(fieldRequest, new RestToXContentListener<>(channel)); } private static ParseField INDEX_FILTER_FIELD = new ParseField("index_filter"); private static ParseField RUNTIME_MAPPINGS_FIELD = new ParseField("runtime_mappings"); + private static ParseField FIELDS_FIELD = new ParseField("fields"); private static final ObjectParser PARSER = new ObjectParser<>("field_caps_request"); static { PARSER.declareObject(FieldCapabilitiesRequest::indexFilter, (p, c) -> parseInnerQueryBuilder(p), INDEX_FILTER_FIELD); PARSER.declareObject(FieldCapabilitiesRequest::runtimeFields, (p, c) -> p.map(), RUNTIME_MAPPINGS_FIELD); + PARSER.declareStringArray(fromList(String.class, FieldCapabilitiesRequest::fields), FIELDS_FIELD); } } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java index 2f94d8cd8e3f4..3af2639538f0d 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java @@ -19,10 +19,14 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; import java.util.ArrayList; @@ -31,6 +35,7 @@ import java.util.function.Consumer; import static java.util.Collections.singletonMap; +import static org.elasticsearch.xcontent.ObjectParser.fromList; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -146,6 +151,18 @@ public void testToXContent() throws IOException { }""").replaceAll("\\s+", ""), xContent); } + public void testFromXContent() throws IOException { + XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"fields\" : [\"FOO\"] }"); + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); + ObjectParser PARSER = new ObjectParser<>("field_caps_request"); + PARSER.declareStringArray(fromList(String.class, FieldCapabilitiesRequest::fields), new ParseField("fields")); + + PARSER.parse(parser, request, null); + + assertArrayEquals(request.fields(), new String[] { "FOO" }); + + } + public void testValidation() { FieldCapabilitiesRequest request = new FieldCapabilitiesRequest().indices("index2"); ActionRequestValidationException exception = request.validate(); diff --git a/server/src/test/java/org/elasticsearch/rest/action/RestFieldCapabilitiesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/RestFieldCapabilitiesActionTests.java new file mode 100644 index 0000000000000..0b98a68c1dbce --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/RestFieldCapabilitiesActionTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; + +import static org.mockito.Mockito.mock; + +public class RestFieldCapabilitiesActionTests extends ESTestCase { + + private RestFieldCapabilitiesAction action; + + @Before + public void setUpAction() { + action = new RestFieldCapabilitiesAction(); + } + + public void testRequestBodyAndParamsBothInput() throws IOException { + String content = "{ \"fields\": [\"title\"] }"; + HashMap paramsMap = new HashMap<>(); + paramsMap.put("fields", "title"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_field_caps") + .withParams(paramsMap) + .withContent(new BytesArray(content), XContentType.JSON) + .build(); + try { + action.prepareRequest(request, mock(NodeClient.class)); + fail("expected failure"); + } catch (IllegalArgumentException e) { + assertEquals( + e.getMessage(), + "can't specify a request body and [fields]" + + " request parameter, either specify a request body or the" + + " [fields] request parameter" + ); + } + + } +} From e3c33e2acd2c7e8e4bff6027f2bbc1b288e1b72d Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 4 Aug 2022 14:05:09 -0400 Subject: [PATCH 113/265] Deduplicate fetching doc-values fields (#89094) If a docvalues field matches multiple field patterns, then ES will return the value of that doc-values field multiple times. Like fetching fields from source, we should deduplicate the matching doc-values fields. --- docs/changelog/89094.yaml | 5 ++ rest-api-spec/build.gradle | 1 + .../test/search/240_date_nanos.yml | 36 ++++++++++- .../search/fields/SearchFieldsIT.java | 59 ++++++++++--------- .../fetch/subphase/FetchDocValuesContext.java | 14 +++-- .../search/SearchServiceTests.java | 43 ++++++++++++++ 6 files changed, 123 insertions(+), 35 deletions(-) create mode 100644 docs/changelog/89094.yaml diff --git a/docs/changelog/89094.yaml b/docs/changelog/89094.yaml new file mode 100644 index 0000000000000..ec0ca105c5a53 --- /dev/null +++ b/docs/changelog/89094.yaml @@ -0,0 +1,5 @@ +pr: 89094 +summary: Deduplicate fetching doc-values fields +area: Search +type: bug +issues: [] diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index b0b327a9f4ff2..4d8e8bc6f19c7 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -80,6 +80,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTest("search.aggregation/20_terms/numeric profiler", "The profiler results aren't backwards compatible.") task.skipTest("migration/10_get_feature_upgrade_status/Get feature upgrade status", "Awaits backport") task.skipTest("search/330_fetch_fields/Test disable source", "Error no longer thrown") + task.skipTest("search/240_date_nanos/doc value fields are working as expected across date and date_nanos fields", "Fetching docvalues field multiple times is no longer allowed") task.replaceValueInMatch("_type", "_doc") task.addAllowedWarningRegex("\\[types removal\\].*") diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/240_date_nanos.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/240_date_nanos.yml index 007f7f7f0e88e..95f1ee6cd9f38 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/240_date_nanos.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/240_date_nanos.yml @@ -91,8 +91,40 @@ setup: docvalue_fields: - field: date format: strict_date_optional_time + sort: + - date: desc + + - match: { hits.total: 2 } + - length: { hits.hits: 2 } + - match: { hits.hits.0._id: "date_ns_1" } + - match: { hits.hits.1._id: "date_ms_1" } + - match: { hits.hits.0.fields.date: [ "2018-10-29T12:12:12.123Z"] } + - match: { hits.hits.1.fields.date: [ "2018-10-29T12:12:12.987Z"] } + + - do: + search: + rest_total_hits_as_int: true + index: date* + body: + docvalue_fields: - field: date format: epoch_millis + sort: + - date: desc + + - match: { hits.total: 2 } + - length: { hits.hits: 2 } + - match: { hits.hits.0._id: "date_ns_1" } + - match: { hits.hits.1._id: "date_ms_1" } + - match: { hits.hits.0.fields.date: [ "1540815132123.456789" ] } + - match: { hits.hits.1.fields.date: [ "1540815132987" ] } + + - do: + search: + rest_total_hits_as_int: true + index: date* + body: + docvalue_fields: - field: date format: uuuu-MM-dd'T'HH:mm:ss.SSSSSSSSSX sort: @@ -102,8 +134,8 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._id: "date_ns_1" } - match: { hits.hits.1._id: "date_ms_1" } - - match: { hits.hits.0.fields.date: [ "2018-10-29T12:12:12.123Z", "1540815132123.456789", "2018-10-29T12:12:12.123456789Z" ] } - - match: { hits.hits.1.fields.date: [ "2018-10-29T12:12:12.987Z", "1540815132987", "2018-10-29T12:12:12.987000000Z" ] } + - match: { hits.hits.0.fields.date: [ "2018-10-29T12:12:12.123456789Z" ] } + - match: { hits.hits.1.fields.date: [ "2018-10-29T12:12:12.987000000Z" ] } --- "date histogram aggregation with date and date_nanos mapping": diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java index bedee045b6816..317a9e5304f1e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -866,6 +866,9 @@ public void testDocValueFields() throws Exception { .addDocValueField("boolean_field") .addDocValueField("binary_field") .addDocValueField("ip_field"); + if (randomBoolean()) { + builder.addDocValueField("*_field"); + } SearchResponse searchResponse = builder.get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -891,21 +894,21 @@ public void testDocValueFields() throws Exception { ) ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo((Object) 3L)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo((Object) 4L)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); assertThat( searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), equalTo(DateFormatter.forPattern("date_optional_time").format(date)) ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ=")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); builder = client().prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field"); searchResponse = builder.get(); @@ -933,21 +936,21 @@ public void testDocValueFields() throws Exception { ) ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo((Object) 3L)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo((Object) 4L)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); assertThat( searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), equalTo(DateFormatter.forPattern("date_optional_time").format(date)) ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ=")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -968,12 +971,12 @@ public void testDocValueFields() throws Exception { equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", "float_field", "double_field", "date_field")) ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue(), equalTo("1.0")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue(), equalTo("2.0")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo("3.0")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo("4.0")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo("5.0")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo("6.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of("1.0"))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of("2.0"))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of("3.0"))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of("4.0"))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of("5.0"))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of("6.0"))); assertThat( searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), equalTo(DateFormatter.forPattern("epoch_millis").format(date)) diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchDocValuesContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchDocValuesContext.java index a2a58ca7be53c..2ae7f6d07bbb9 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchDocValuesContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchDocValuesContext.java @@ -10,9 +10,10 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.SearchExecutionContext; -import java.util.ArrayList; import java.util.Collection; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; /** * All the required context to pull a field from the doc values. @@ -23,20 +24,23 @@ */ public class FetchDocValuesContext { - private final List fields = new ArrayList<>(); + private final Collection fields; /** * Create a new FetchDocValuesContext using the provided input list. * Field patterns containing wildcards are resolved and unmapped fields are filtered out. */ public FetchDocValuesContext(SearchExecutionContext searchExecutionContext, List fieldPatterns) { + // Use Linked HashMap to reserve the fetching order + final Map fieldToFormats = new LinkedHashMap<>(); for (FieldAndFormat field : fieldPatterns) { Collection fieldNames = searchExecutionContext.getMatchingFieldNames(field.field); for (String fieldName : fieldNames) { - fields.add(new FieldAndFormat(fieldName, field.format, field.includeUnmapped)); + // the last matching field wins + fieldToFormats.put(fieldName, new FieldAndFormat(fieldName, field.format, field.includeUnmapped)); } } - + this.fields = fieldToFormats.values(); int maxAllowedDocvalueFields = searchExecutionContext.getIndexSettings().getMaxDocvalueFields(); if (fields.size() > maxAllowedDocvalueFields) { throw new IllegalArgumentException( @@ -54,7 +58,7 @@ public FetchDocValuesContext(SearchExecutionContext searchExecutionContext, List /** * Returns the required docvalue fields. */ - public List fields() { + public Collection fields() { return this.fields; } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 8baa9de263463..36366fd30c15a 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -79,6 +79,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchRequest; +import org.elasticsearch.search.fetch.subphase.FieldAndFormat; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.SearchContext; @@ -126,6 +127,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.startsWith; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.mock; @@ -592,6 +594,47 @@ public void testMaxDocvalueFieldsSearch() throws IOException { } } + public void testDeduplicateDocValuesFields() throws Exception { + createIndex("index", Settings.EMPTY, "_doc", "field1", "type=date", "field2", "type=date"); + client().prepareIndex("index") + .setId("1") + .setSource("field1", "2022-08-03", "field2", "2022-08-04") + .setRefreshPolicy(IMMEDIATE) + .get(); + SearchService service = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + + try (ReaderContext reader = createReaderContext(indexService, indexShard)) { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchRequest.source(searchSourceBuilder); + searchSourceBuilder.docValueField("f*"); + if (randomBoolean()) { + searchSourceBuilder.docValueField("field*"); + } + if (randomBoolean()) { + searchSourceBuilder.docValueField("*2"); + } + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, + -1, + null + ); + try (SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), randomBoolean())) { + Collection fields = context.docValuesContext().fields(); + assertThat(fields, containsInAnyOrder(new FieldAndFormat("field1", null), new FieldAndFormat("field2", null))); + } + } + } + /** * test that getting more than the allowed number of script_fields throws an exception */ From d588d456f07e62b2e0a88c03419c99090c78b7d0 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Thu, 4 Aug 2022 14:45:15 -0400 Subject: [PATCH 114/265] [ML] add new trained model deployment cache clear API (#89074) This adds a new `_ml/trained_models//deployment/cache/_clear` API. This will clear the inference cache on every node where the model is allocated. --- docs/changelog/89074.yaml | 5 + ...ar-trained-model-deployment-cache.asciidoc | 57 ++++++++++ .../ml/trained-models/apis/index.asciidoc | 2 + ....clear_trained_model_deployment_cache.json | 31 ++++++ .../ml/action/ClearDeploymentCacheAction.java | 102 ++++++++++++++++++ ...learDeploymentCacheActionRequestTests.java | 23 ++++ ...earDeploymentCacheActionResponseTests.java | 23 ++++ .../xpack/ml/MachineLearning.java | 5 + .../TransportClearDeploymentCacheAction.java | 97 +++++++++++++++++ .../TrainedModelAssignmentNodeService.java | 8 +- ... AbstractControlMessagePyTorchAction.java} | 35 +++--- ...ClearCacheControlMessagePytorchAction.java | 43 ++++++++ .../deployment/DeploymentManager.java | 23 +++- ...adSettingsControlMessagePytorchAction.java | 49 +++++++++ .../TrainedModelDeploymentTask.java | 4 + .../RestClearDeploymentCacheAction.java | 47 ++++++++ ...ingsControlMessagePytorchActionTests.java} | 25 +++-- .../xpack/security/operator/Constants.java | 1 + .../test/ml/3rd_party_deployment.yml | 86 +++++++++++++++ 19 files changed, 638 insertions(+), 28 deletions(-) create mode 100644 docs/changelog/89074.yaml create mode 100644 docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/ml.clear_trained_model_deployment_cache.json create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheAction.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheActionRequestTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheActionResponseTests.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/{ControlMessagePyTorchAction.java => AbstractControlMessagePyTorchAction.java} (75%) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ClearCacheControlMessagePytorchAction.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ThreadSettingsControlMessagePytorchAction.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestClearDeploymentCacheAction.java rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/{ControlMessagePyTorchActionTests.java => ThreadSettingsControlMessagePytorchActionTests.java} (84%) diff --git a/docs/changelog/89074.yaml b/docs/changelog/89074.yaml new file mode 100644 index 0000000000000..f1a5b1e716b82 --- /dev/null +++ b/docs/changelog/89074.yaml @@ -0,0 +1,5 @@ +pr: 89074 +summary: Add new trained model deployment cache clear API +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc b/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc new file mode 100644 index 0000000000000..e5be3d57d5a46 --- /dev/null +++ b/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc @@ -0,0 +1,57 @@ +[role="xpack"] +[[clear-trained-model-deployment-cache]] += Clear trained model deployment cache API +[subs="attributes"] +++++ +Clear trained model deployment cache +++++ + +Clears a trained model deployment cache on all nodes where the trained model is assigned. + +preview::[] + +[[clear-trained-model-deployment-cache-request]] +== {api-request-title} + +`POST _ml/trained_models//deployment/cache/_clear` + +[[clear-trained-model-deployment-cache-prereq]] +== {api-prereq-title} + +Requires the `manage_ml` cluster privilege. This privilege is included in the +`machine_learning_admin` built-in role. + +[[clear-trained-model-deployment-cache-desc]] +== {api-description-title} + +A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, +their responses may be cached on that individual node. Calling this API clears the caches without restarting the +deployment. + +[[clear-trained-model-deployment-cache-path-params]] +== {api-path-parms-title} + +``:: +(Required, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] + +[[clear-trained-model-deployment-cache-example]] +== {api-examples-title} + +The following example clears the cache for the new deployment for the +`elastic__distilbert-base-uncased-finetuned-conll03-english` trained model: + +[source,console] +-------------------------------------------------- +POST _ml/trained_models/elastic__distilbert-base-uncased-finetuned-conll03-english/deployment/cache/_clear +-------------------------------------------------- +// TEST[skip:TBD] + +The API returns the following results: + +[source,console-result] +---- +{ + "cleared": true +} +---- diff --git a/docs/reference/ml/trained-models/apis/index.asciidoc b/docs/reference/ml/trained-models/apis/index.asciidoc index 148e9bb038ef4..b15aad6854df5 100644 --- a/docs/reference/ml/trained-models/apis/index.asciidoc +++ b/docs/reference/ml/trained-models/apis/index.asciidoc @@ -12,6 +12,8 @@ include::get-trained-models.asciidoc[leveloffset=+2] include::get-trained-models-stats.asciidoc[leveloffset=+2] //INFER include::infer-trained-model.asciidoc[leveloffset=+2][leveloffset=+2] +//UPDATE +include::clear-trained-model-deployment-cache.asciidoc[leveloffset=+2] //START/STOP include::start-trained-model-deployment.asciidoc[leveloffset=+2] include::stop-trained-model-deployment.asciidoc[leveloffset=+2] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.clear_trained_model_deployment_cache.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.clear_trained_model_deployment_cache.json new file mode 100644 index 0000000000000..67998ae3085a3 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.clear_trained_model_deployment_cache.json @@ -0,0 +1,31 @@ +{ + "ml.clear_trained_model_deployment_cache":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-trained-model-deployment-cache.html", + "description":"Clear the cached results from a trained model deployment" + }, + "stability":"experimental", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_ml/trained_models/{model_id}/deployment/cache/_clear", + "methods":[ + "POST" + ], + "parts":{ + "model_id":{ + "type":"string", + "description":"The unique identifier of the trained model.", + "required":true + } + } + } + ] + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheAction.java new file mode 100644 index 0000000000000..c028bed1d8b49 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheAction.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; + +public class ClearDeploymentCacheAction extends ActionType { + public static final ClearDeploymentCacheAction INSTANCE = new ClearDeploymentCacheAction(); + public static final String NAME = "cluster:admin/xpack/ml/trained_models/deployment/clear_cache"; + + private ClearDeploymentCacheAction() { + super(NAME, Response::new); + } + + public static class Request extends BaseTasksRequest { + private final String deploymentId; + + public Request(String deploymentId) { + this.deploymentId = ExceptionsHelper.requireNonNull(deploymentId, "deployment_id"); + } + + public Request(StreamInput in) throws IOException { + super(in); + this.deploymentId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(deploymentId); + } + + public String getDeploymentId() { + return deploymentId; + } + + @Override + public boolean match(Task task) { + return StartTrainedModelDeploymentAction.TaskMatcher.match(task, deploymentId); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(deploymentId, request.deploymentId); + } + + @Override + public int hashCode() { + return Objects.hash(deploymentId); + } + } + + public static class Response extends BaseTasksResponse implements ToXContentObject { + + private final boolean cleared; + + public Response(boolean cleared) { + super(Collections.emptyList(), Collections.emptyList()); + this.cleared = cleared; + } + + public Response(StreamInput in) throws IOException { + super(in); + this.cleared = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(cleared); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field("cleared", cleared); + builder.endObject(); + return builder; + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheActionRequestTests.java new file mode 100644 index 0000000000000..b6b6ec36da491 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheActionRequestTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +public class ClearDeploymentCacheActionRequestTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return ClearDeploymentCacheAction.Request::new; + } + + @Override + protected ClearDeploymentCacheAction.Request createTestInstance() { + return new ClearDeploymentCacheAction.Request(randomAlphaOfLength(5)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheActionResponseTests.java new file mode 100644 index 0000000000000..90455b406952a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ClearDeploymentCacheActionResponseTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +public class ClearDeploymentCacheActionResponseTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return ClearDeploymentCacheAction.Response::new; + } + + @Override + protected ClearDeploymentCacheAction.Response createTestInstance() { + return new ClearDeploymentCacheAction.Response(randomBoolean()); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 6f860f370962d..0467d174f8616 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -92,6 +92,7 @@ import org.elasticsearch.xpack.core.ml.MlStatsIndex; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.CancelJobModelSnapshotUpgradeAction; +import org.elasticsearch.xpack.core.ml.action.ClearDeploymentCacheAction; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.DeleteCalendarAction; @@ -189,6 +190,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.template.TemplateUtils; import org.elasticsearch.xpack.ml.action.TransportCancelJobModelSnapshotUpgradeAction; +import org.elasticsearch.xpack.ml.action.TransportClearDeploymentCacheAction; import org.elasticsearch.xpack.ml.action.TransportCloseJobAction; import org.elasticsearch.xpack.ml.action.TransportCreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.ml.action.TransportDeleteCalendarAction; @@ -391,6 +393,7 @@ import org.elasticsearch.xpack.ml.rest.filter.RestGetFiltersAction; import org.elasticsearch.xpack.ml.rest.filter.RestPutFilterAction; import org.elasticsearch.xpack.ml.rest.filter.RestUpdateFilterAction; +import org.elasticsearch.xpack.ml.rest.inference.RestClearDeploymentCacheAction; import org.elasticsearch.xpack.ml.rest.inference.RestDeleteTrainedModelAction; import org.elasticsearch.xpack.ml.rest.inference.RestDeleteTrainedModelAliasAction; import org.elasticsearch.xpack.ml.rest.inference.RestGetTrainedModelsAction; @@ -1254,6 +1257,7 @@ public List getRestHandlers( new RestPutTrainedModelDefinitionPartAction(), new RestPutTrainedModelVocabularyAction(), new RestInferTrainedModelAction(), + new RestClearDeploymentCacheAction(), // CAT Handlers new RestCatJobsAction(), new RestCatTrainedModelsAction(), @@ -1358,6 +1362,7 @@ public List getRestHandlers( UpdateTrainedModelAssignmentRoutingInfoAction.INSTANCE, TransportUpdateTrainedModelAssignmentStateAction.class ), + new ActionHandler<>(ClearDeploymentCacheAction.INSTANCE, TransportClearDeploymentCacheAction.class), usageAction, infoAction ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java new file mode 100644 index 0000000000000..36f56aa0b390a --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ml.action.ClearDeploymentCacheAction; +import org.elasticsearch.xpack.core.ml.action.ClearDeploymentCacheAction.Request; +import org.elasticsearch.xpack.core.ml.action.ClearDeploymentCacheAction.Response; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; +import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.ExceptionsHelper.convertToElastic; + +public class TransportClearDeploymentCacheAction extends TransportTasksAction { + + @Inject + public TransportClearDeploymentCacheAction( + TransportService transportService, + ActionFilters actionFilters, + ClusterService clusterService + ) { + super( + ClearDeploymentCacheAction.NAME, + clusterService, + transportService, + actionFilters, + Request::new, + Response::new, + Response::new, + ThreadPool.Names.SAME + ); + } + + @Override + protected Response newResponse( + Request request, + List taskResponse, + List taskOperationFailures, + List failedNodeExceptions + ) { + if (taskOperationFailures.isEmpty() == false) { + throw convertToElastic(taskOperationFailures.get(0).getCause()); + } else if (failedNodeExceptions.isEmpty() == false) { + throw convertToElastic(failedNodeExceptions.get(0)); + } + return new Response(true); + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + final ClusterState clusterState = clusterService.state(); + final TrainedModelAssignmentMetadata assignment = TrainedModelAssignmentMetadata.fromState(clusterState); + TrainedModelAssignment trainedModelAssignment = assignment.getModelAssignment(request.getDeploymentId()); + if (trainedModelAssignment == null) { + listener.onFailure(new ResourceNotFoundException("assignment for model with id [{}] not found", request.getDeploymentId())); + return; + } + String[] nodes = trainedModelAssignment.getNodeRoutingTable() + .entrySet() + .stream() + .filter(entry -> entry.getValue().isRoutable()) + .map(Map.Entry::getKey) + .toArray(String[]::new); + + if (nodes.length == 0) { + listener.onResponse(new Response(true)); + return; + } + request.setNodes(nodes); + super.doExecute(task, request, listener); + } + + @Override + protected void taskOperation(Task actionTask, Request request, TrainedModelDeploymentTask task, ActionListener listener) { + task.clearCache(ActionListener.wrap(r -> listener.onResponse(new Response(true)), listener::onFailure)); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index 700baef487fcb..300a5dc262c88 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -66,7 +66,7 @@ public class TrainedModelAssignmentNodeService implements ClusterStateListener { private static final String NODE_NO_LONGER_REFERENCED = "node no longer referenced in model routing table"; private static final String ASSIGNMENT_NO_LONGER_EXISTS = "model assignment no longer exists"; private static final TimeValue MODEL_LOADING_CHECK_INTERVAL = TimeValue.timeValueSeconds(1); - private static final TimeValue UPDATE_NUMBER_OF_ALLOCATIONS_TIMEOUT = TimeValue.timeValueSeconds(60); + private static final TimeValue CONTROL_MESSAGE_TIMEOUT = TimeValue.timeValueSeconds(60); private static final Logger logger = LogManager.getLogger(TrainedModelAssignmentNodeService.class); private final TrainedModelAssignmentService trainedModelAssignmentService; private final DeploymentManager deploymentManager; @@ -286,6 +286,10 @@ public Optional modelStats(TrainedModelDeploymentTask task) { return deploymentManager.getStats(task); } + public void clearCache(TrainedModelDeploymentTask task, ActionListener listener) { + deploymentManager.clearCache(task, CONTROL_MESSAGE_TIMEOUT, listener); + } + private TaskAwareRequest taskAwareRequest(StartTrainedModelDeploymentAction.TaskParams params) { final TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = this; return new TaskAwareRequest() { @@ -419,7 +423,7 @@ private void updateNumberOfAllocations(TrainedModelAssignmentMetadata assignment deploymentManager.updateNumAllocations( task, assignment.getNodeRoutingTable().get(nodeId).getTargetAllocations(), - UPDATE_NUMBER_OF_ALLOCATIONS_TIMEOUT, + CONTROL_MESSAGE_TIMEOUT, ActionListener.wrap(threadSettings -> { logger.debug("[{}] Updated number of allocations to [{}]", assignment.getModelId(), threadSettings.numAllocations()); task.updateNumberOfAllocations(threadSettings.numAllocations()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ControlMessagePyTorchAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/AbstractControlMessagePyTorchAction.java similarity index 75% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ControlMessagePyTorchAction.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/AbstractControlMessagePyTorchAction.java index a6675736d61ff..85f1c694859c2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ControlMessagePyTorchAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/AbstractControlMessagePyTorchAction.java @@ -17,35 +17,37 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchResult; -import org.elasticsearch.xpack.ml.inference.pytorch.results.ThreadSettings; import java.io.IOException; import static org.elasticsearch.core.Strings.format; -class ControlMessagePyTorchAction extends AbstractPyTorchAction { +abstract class AbstractControlMessagePyTorchAction extends AbstractPyTorchAction { private static final Logger logger = LogManager.getLogger(InferencePyTorchAction.class); - private final int numAllocationThreads; - - private enum ControlMessageTypes { - AllocationThreads + enum ControlMessageTypes { + AllocationThreads, + ClearCache }; - ControlMessagePyTorchAction( + AbstractControlMessagePyTorchAction( String modelId, long requestId, - int numAllocationThreads, TimeValue timeout, DeploymentManager.ProcessContext processContext, ThreadPool threadPool, - ActionListener listener + ActionListener listener ) { super(modelId, requestId, timeout, processContext, threadPool, listener); - this.numAllocationThreads = numAllocationThreads; } + abstract int controlOrdinal(); + + abstract void writeMessage(XContentBuilder builder) throws IOException; + + abstract T getResult(PyTorchResult result); + @Override protected void doRun() throws Exception { if (isNotified()) { @@ -56,7 +58,7 @@ protected void doRun() throws Exception { final String requestIdStr = String.valueOf(getRequestId()); try { - var message = buildControlMessage(requestIdStr, numAllocationThreads); + var message = buildControlMessage(requestIdStr); getProcessContext().getResultProcessor() .registerRequest(requestIdStr, ActionListener.wrap(this::processResponse, this::onFailure)); @@ -70,24 +72,23 @@ protected void doRun() throws Exception { } } - public static BytesReference buildControlMessage(String requestId, int numAllocationThreads) throws IOException { + final BytesReference buildControlMessage(String requestId) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); builder.field("request_id", requestId); - builder.field("control", ControlMessageTypes.AllocationThreads.ordinal()); - builder.field("num_allocations", numAllocationThreads); + builder.field("control", controlOrdinal()); + writeMessage(builder); builder.endObject(); - // BytesReference.bytes closes the builder return BytesReference.bytes(builder); } - public void processResponse(PyTorchResult result) { + private void processResponse(PyTorchResult result) { if (result.isError()) { onFailure(result.errorResult().error()); return; } - onSuccess(result.threadSettings()); + onSuccess(getResult(result)); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ClearCacheControlMessagePytorchAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ClearCacheControlMessagePytorchAction.java new file mode 100644 index 0000000000000..8af99eb3b1c3d --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ClearCacheControlMessagePytorchAction.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.deployment; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchResult; + +public class ClearCacheControlMessagePytorchAction extends AbstractControlMessagePyTorchAction { + + ClearCacheControlMessagePytorchAction( + String modelId, + long requestId, + TimeValue timeout, + DeploymentManager.ProcessContext processContext, + ThreadPool threadPool, + ActionListener listener + ) { + super(modelId, requestId, timeout, processContext, threadPool, listener); + } + + @Override + int controlOrdinal() { + return ControlMessageTypes.ClearCache.ordinal(); + } + + @Override + void writeMessage(XContentBuilder builder) { + // Nothing is written + } + + @Override + Boolean getResult(PyTorchResult result) { + return true; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index 4e6fe4fc0ca2e..362c421694d12 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -296,7 +297,7 @@ public void updateNumAllocations( } final long requestId = requestIdCounter.getAndIncrement(); - ControlMessagePyTorchAction controlMessageAction = new ControlMessagePyTorchAction( + ThreadSettingsControlMessagePytorchAction controlMessageAction = new ThreadSettingsControlMessagePytorchAction( task.getModelId(), requestId, numAllocationThreads, @@ -309,6 +310,26 @@ public void updateNumAllocations( executePyTorchAction(processContext, PriorityProcessWorkerExecutorService.RequestPriority.HIGHEST, controlMessageAction); } + public void clearCache(TrainedModelDeploymentTask task, TimeValue timeout, ActionListener listener) { + var processContext = getProcessContext(task, listener::onFailure); + if (processContext == null) { + // error reporting handled in the call to getProcessContext + return; + } + + final long requestId = requestIdCounter.getAndIncrement(); + ClearCacheControlMessagePytorchAction controlMessageAction = new ClearCacheControlMessagePytorchAction( + task.getModelId(), + requestId, + timeout, + processContext, + threadPool, + ActionListener.wrap(b -> listener.onResponse(AcknowledgedResponse.TRUE), listener::onFailure) + ); + + executePyTorchAction(processContext, PriorityProcessWorkerExecutorService.RequestPriority.HIGHEST, controlMessageAction); + } + public void executePyTorchAction( ProcessContext processContext, PriorityProcessWorkerExecutorService.RequestPriority priority, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ThreadSettingsControlMessagePytorchAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ThreadSettingsControlMessagePytorchAction.java new file mode 100644 index 0000000000000..8c52017d4bf0a --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/ThreadSettingsControlMessagePytorchAction.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.deployment; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchResult; +import org.elasticsearch.xpack.ml.inference.pytorch.results.ThreadSettings; + +import java.io.IOException; + +public class ThreadSettingsControlMessagePytorchAction extends AbstractControlMessagePyTorchAction { + private final int numAllocationThreads; + + ThreadSettingsControlMessagePytorchAction( + String modelId, + long requestId, + int numAllocationThreads, + TimeValue timeout, + DeploymentManager.ProcessContext processContext, + ThreadPool threadPool, + ActionListener listener + ) { + super(modelId, requestId, timeout, processContext, threadPool, listener); + this.numAllocationThreads = numAllocationThreads; + } + + @Override + int controlOrdinal() { + return ControlMessageTypes.AllocationThreads.ordinal(); + } + + @Override + void writeMessage(XContentBuilder builder) throws IOException { + builder.field("num_allocations", numAllocationThreads); + } + + @Override + ThreadSettings getResult(PyTorchResult result) { + return result.threadSettings(); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java index caef67ddab889..9ea8c81754c3d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java @@ -167,6 +167,10 @@ public Optional modelStats() { return trainedModelAssignmentNodeService.modelStats(this); } + public void clearCache(ActionListener listener) { + trainedModelAssignmentNodeService.clearCache(this, listener); + } + public void setFailed(String reason) { failed = true; trainedModelAssignmentNodeService.failAssignment(this, reason); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestClearDeploymentCacheAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestClearDeploymentCacheAction.java new file mode 100644 index 0000000000000..d2b9f316409f2 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestClearDeploymentCacheAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.rest.inference; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.ml.action.ClearDeploymentCacheAction; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; + +public class RestClearDeploymentCacheAction extends BaseRestHandler { + + @Override + public String getName() { + return "xpack_ml_clear_deployment_cache_action"; + } + + @Override + public List routes() { + return Collections.singletonList( + new Route(POST, BASE_PATH + "trained_models/{" + TrainedModelConfig.MODEL_ID.getPreferredName() + "}/deployment/cache/_clear") + ); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String modelId = restRequest.param(TrainedModelConfig.MODEL_ID.getPreferredName()); + return channel -> client.execute( + ClearDeploymentCacheAction.INSTANCE, + new ClearDeploymentCacheAction.Request(modelId), + new RestToXContentListener<>(channel) + ); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/ControlMessagePyTorchActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/ThreadSettingsControlMessagePytorchActionTests.java similarity index 84% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/ControlMessagePyTorchActionTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/ThreadSettingsControlMessagePytorchActionTests.java index de866528a21be..cbcc6e5959e22 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/ControlMessagePyTorchActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/ThreadSettingsControlMessagePytorchActionTests.java @@ -32,13 +32,22 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -public class ControlMessagePyTorchActionTests extends ESTestCase { - - private ThreadPool tp; +public class ThreadSettingsControlMessagePytorchActionTests extends ESTestCase { public void testBuildControlMessage() throws IOException { - var message = ControlMessagePyTorchAction.buildControlMessage("foo", 4); - + DeploymentManager.ProcessContext processContext = mock(DeploymentManager.ProcessContext.class); + ThreadPool tp = mock(ThreadPool.class); + @SuppressWarnings("unchecked") + ThreadSettingsControlMessagePytorchAction action = new ThreadSettingsControlMessagePytorchAction( + "model_id", + 1, + 4, + TimeValue.MINUS_ONE, + processContext, + tp, + ActionListener.NOOP + ); + var message = action.buildControlMessage("foo"); assertEquals("{\"request_id\":\"foo\",\"control\":0,\"num_allocations\":4}", message.utf8ToString()); } @@ -56,7 +65,7 @@ public void testRunNotCalledAfterNotified() { { ActionListener listener = mock(ActionListener.class); - ControlMessagePyTorchAction action = new ControlMessagePyTorchAction( + ThreadSettingsControlMessagePytorchAction action = new ThreadSettingsControlMessagePytorchAction( "test-model", 1, 1, @@ -75,7 +84,7 @@ public void testRunNotCalledAfterNotified() { } { ActionListener listener = mock(ActionListener.class); - ControlMessagePyTorchAction action = new ControlMessagePyTorchAction( + ThreadSettingsControlMessagePytorchAction action = new ThreadSettingsControlMessagePytorchAction( "test-model", 1, 1, @@ -114,7 +123,7 @@ public void testDoRun() throws IOException { ArgumentCaptor messageCapture = ArgumentCaptor.forClass(BytesReference.class); doNothing().when(pp).writeInferenceRequest(messageCapture.capture()); - ControlMessagePyTorchAction action = new ControlMessagePyTorchAction( + ThreadSettingsControlMessagePytorchAction action = new ThreadSettingsControlMessagePytorchAction( "test-model", 1, 1, diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 753f8c5cd716b..c6c7d538f8118 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -160,6 +160,7 @@ public class Constants { "cluster:admin/xpack/ml/job/update", "cluster:admin/xpack/ml/job/validate", "cluster:admin/xpack/ml/job/validate/detector", + "cluster:admin/xpack/ml/trained_models/deployment/clear_cache", "cluster:admin/xpack/ml/trained_models/deployment/start", "cluster:admin/xpack/ml/trained_models/deployment/stop", "cluster:admin/xpack/ml/trained_models/part/put", diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml index 6d0348b1fba92..88e0539b1d6ca 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml @@ -130,6 +130,92 @@ setup: - match: { trained_model_stats.0.deployment_stats.nodes.0.inference_count: 3 } - match: { trained_model_stats.0.deployment_stats.nodes.0.inference_cache_hit_count: 1 } + - do: + ml.stop_trained_model_deployment: + model_id: test_model + - match: { stopped: true } +--- +"Test clear deployment cache": + - skip: + features: allowed_warnings + + - do: + ml.start_trained_model_deployment: + model_id: test_model + cache_size: 10kb + wait_for: started + - match: {assignment.assignment_state: started} + - match: {assignment.task_parameters.model_id: test_model} + - match: {assignment.task_parameters.cache_size: 10kb} + + - do: + allowed_warnings: + - '[POST /_ml/trained_models/{model_id}/deployment/_infer] is deprecated! Use [POST /_ml/trained_models/{model_id}/_infer] instead.' + ml.infer_trained_model: + model_id: "test_model" + body: > + { + "docs": [ + { "input": "words" } + ] + } + + - do: + allowed_warnings: + - '[POST /_ml/trained_models/{model_id}/deployment/_infer] is deprecated! Use [POST /_ml/trained_models/{model_id}/_infer] instead.' + ml.infer_trained_model: + model_id: "test_model" + body: > + { + "docs": [ + { "input": "are" } + ] + } + + - do: + allowed_warnings: + - '[POST /_ml/trained_models/{model_id}/deployment/_infer] is deprecated! Use [POST /_ml/trained_models/{model_id}/_infer] instead.' + ml.infer_trained_model: + model_id: "test_model" + body: > + { + "docs": [ + { "input": "words" } + ] + } + + - do: + ml.get_trained_models_stats: + model_id: "test_model" + - match: { count: 1 } + - match: { trained_model_stats.0.deployment_stats.nodes.0.inference_count: 3 } + - match: { trained_model_stats.0.deployment_stats.nodes.0.inference_cache_hit_count: 1 } + + + - do: + ml.clear_trained_model_deployment_cache: + model_id: test_model + - match: { cleared: true } + + - do: + allowed_warnings: + - '[POST /_ml/trained_models/{model_id}/deployment/_infer] is deprecated! Use [POST /_ml/trained_models/{model_id}/_infer] instead.' + ml.infer_trained_model: + model_id: "test_model" + body: > + { + "docs": [ + { "input": "words" } + ] + } + + - do: + ml.get_trained_models_stats: + model_id: "test_model" + - match: { count: 1 } + - match: { trained_model_stats.0.deployment_stats.nodes.0.inference_count: 4 } + - match: { trained_model_stats.0.deployment_stats.nodes.0.inference_cache_hit_count: 1 } + - do: ml.stop_trained_model_deployment: model_id: test_model From d6e6980b0bf1c7e0c7e8c87dea6a897977161ce2 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 4 Aug 2022 22:18:45 +0200 Subject: [PATCH 115/265] Remove unused datastream snapshot utility from Metadata (#88535) This method was introduced to fix datastream snapshots during concurrent index/datastream changes but was never actually used because we went with a different approach in the end. => remove it and its tests --- .../cluster/metadata/Metadata.java | 22 +--------- .../cluster/metadata/MetadataTests.java | 43 ------------------- 2 files changed, 1 insertion(+), 64 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index c18bdb6e97d1c..f25599bcd4915 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1128,26 +1128,6 @@ public static boolean isGlobalStateEquals(Metadata metadata1, Metadata metadata2 return true; } - /** - * Reconciles the cluster state metadata taken at the end of a snapshot with the data streams and indices - * contained in the snapshot. Certain actions taken during a snapshot such as rolling over a data stream - * or deleting a backing index may result in situations where some reconciliation is required. - * - * @return Reconciled {@link Metadata} instance - */ - public static Metadata snapshot(Metadata metadata, List dataStreams, List indices) { - var builder = Metadata.builder(metadata); - for (var dsName : dataStreams) { - var dataStream = metadata.dataStreams().get(dsName); - if (dataStream == null) { - // should never occur since data streams cannot be deleted while they have snapshots underway - throw new IllegalArgumentException("unable to find data stream [" + dsName + "]"); - } - builder.put(dataStream.snapshot(indices)); - } - return builder.build(); - } - @Override public Diff diff(Metadata previousState) { return new MetadataDiff(previousState, this); @@ -1770,7 +1750,7 @@ public Builder removeCustom(String type) { } public Builder removeCustomIf(BiPredicate p) { - customs.removeAll(p::test); + customs.removeAll(p); return this; } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index 7312926f5c669..1e3801fe1f485 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -48,7 +48,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -67,7 +66,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -1436,47 +1434,6 @@ public void testValidateDataStreamsForNullDataStreamMetadata() { } } - /** - * Tests for the implementation of data stream snapshot reconciliation are located in {@link DataStreamTests#testSnapshot()} - */ - public void testSnapshot() { - var postSnapshotMetadata = randomMetadata(randomIntBetween(1, 5)); - var dataStreamsToSnapshot = randomSubsetOf(new ArrayList<>(postSnapshotMetadata.dataStreams().keySet())); - List indicesInSnapshot = new ArrayList<>(); - for (var dsName : dataStreamsToSnapshot) { - // always include at least one backing index per data stream - DataStream ds = postSnapshotMetadata.dataStreams().get(dsName); - indicesInSnapshot.addAll( - randomSubsetOf(randomIntBetween(1, ds.getIndices().size()), ds.getIndices().stream().map(Index::getName).toList()) - ); - } - var reconciledMetadata = Metadata.snapshot(postSnapshotMetadata, dataStreamsToSnapshot, indicesInSnapshot); - assertThat(reconciledMetadata.dataStreams().size(), equalTo(postSnapshotMetadata.dataStreams().size())); - for (DataStream ds : reconciledMetadata.dataStreams().values()) { - assertThat(ds.getIndices().size(), greaterThanOrEqualTo(1)); - } - } - - public void testSnapshotWithMissingDataStream() { - var postSnapshotMetadata = randomMetadata(randomIntBetween(1, 5)); - var dataStreamsToSnapshot = randomSubsetOf(new ArrayList<>(postSnapshotMetadata.dataStreams().keySet())); - List indicesInSnapshot = new ArrayList<>(); - for (var dsName : dataStreamsToSnapshot) { - // always include at least one backing index per data stream - DataStream ds = postSnapshotMetadata.dataStreams().get(dsName); - indicesInSnapshot.addAll( - randomSubsetOf(randomIntBetween(1, ds.getIndices().size()), ds.getIndices().stream().map(Index::getName).toList()) - ); - } - String missingDataStream = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - dataStreamsToSnapshot.add(missingDataStream); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> Metadata.snapshot(postSnapshotMetadata, dataStreamsToSnapshot, indicesInSnapshot) - ); - assertThat(e.getMessage(), containsString("unable to find data stream [" + missingDataStream + "]")); - } - public void testDataStreamAliases() { Metadata.Builder mdBuilder = Metadata.builder(); From e4a9967469835b2dd1d81cf23a172b9d4d02a4cc Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 4 Aug 2022 15:36:01 -0500 Subject: [PATCH 116/265] Handling the master stability health case where there has never been an elected master node (#89137) If a master-eligible node comes up and has never seen an elected master node (and assuming that a quorum requires more than one node), then it ought to report that the master stability health is red because it cannot form a quorum. --- .../CoordinationDiagnosticsServiceIT.java | 48 +++++++++++++++++++ .../CoordinationDiagnosticsService.java | 11 ++++- 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java index 66346aae64dca..010f5a5f0300c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java @@ -11,19 +11,25 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.BlockClusterStateProcessing; import org.elasticsearch.threadpool.Scheduler; import org.junit.Before; +import java.io.IOException; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyOrNullString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -95,4 +101,46 @@ public void testBlockClusterStateProcessingOnOneNode() throws Exception { disruption.stopDisrupting(); } + + public void testNoMasterElected() throws Exception { + /* + * This test starts up a 3-node cluster where all nodes are master eligible. It then shuts down two of the nodes and restarts one + * of them. We then assert that diagnoseMasterStability returns a red status because a quorum can't be formed. This is an edge + * case because since there is no elected master, clusterChanged() is never called (which is what usually kicks off the polling + * that drives the quorum check). + */ + final List masterNodeNames = internalCluster().startMasterOnlyNodes( + 3, + Settings.builder().put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build() + ); + ensureStableCluster(3); + String randomMasterNodeName = internalCluster().getRandomNodeName(); + masterNodeNames.stream().filter(nodeName -> nodeName.equals(randomMasterNodeName) == false).forEach(nodeName -> { + try { + internalCluster().stopNode(nodeName); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + internalCluster().restartNode(randomMasterNodeName, new InternalTestCluster.RestartCallback() { + public boolean validateClusterForming() { + return false; + } + }); + + try { + CoordinationDiagnosticsService diagnosticsOnMasterEligibleNode = internalCluster().getInstance( + CoordinationDiagnosticsService.class, + randomMasterNodeName + ); + diagnosticsOnMasterEligibleNode.remoteRequestInitialDelay = TimeValue.ZERO; + CoordinationDiagnosticsService.CoordinationDiagnosticsResult result = diagnosticsOnMasterEligibleNode.diagnoseMasterStability( + true + ); + assertThat(result.status(), equalTo(CoordinationDiagnosticsService.CoordinationDiagnosticsStatus.RED)); + assertThat(result.summary(), containsString("the master eligible nodes are unable to form a quorum")); + } finally { + internalCluster().stopNode(randomMasterNodeName); // This is needed for the test to clean itself up happily + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index 3987550436ff0..b5f8afaea09ab 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -375,7 +375,9 @@ static CoordinationDiagnosticsResult diagnoseOnHaveNotSeenMasterRecentlyAndWeAre * We want to make sure that the same elements are in this set every time we loop through it. We don't care if values are added * while we're copying it, which is why this is not synchronized. We only care that once we have a copy it is not changed. */ - final Map nodeToClusterFormationResponses = Map.copyOf(clusterFormationResponses); + final Map nodeToClusterFormationResponses = clusterFormationResponses == null + ? Map.of() + : Map.copyOf(clusterFormationResponses); for (Map.Entry entry : nodeToClusterFormationResponses.entrySet()) { Exception remoteException = entry.getValue().exception(); if (remoteException != null) { @@ -400,6 +402,13 @@ static CoordinationDiagnosticsResult diagnoseOnHaveNotSeenMasterRecentlyAndWeAre nodeToClusterFormationResponses.entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().clusterFormationState())); + if (nodeClusterFormationStateMap.isEmpty()) { + /* + * The most likely reason we are here is that polling for cluster formation info never began because there has been no cluster + * changed event because there has never been a master node. So we just use the local cluster formation state. + */ + nodeClusterFormationStateMap = Map.of(coordinator.getLocalNode(), coordinator.getClusterFormationState()); + } Map nodeIdToClusterFormationDescription = nodeClusterFormationStateMap.entrySet() .stream() .collect(Collectors.toMap(entry -> entry.getKey().getId(), entry -> entry.getValue().getDescription())); From e53767835a08a4c5264f9113e684a9977d1fb374 Mon Sep 17 00:00:00 2001 From: wjwei Date: Fri, 5 Aug 2022 17:56:19 +0800 Subject: [PATCH 117/265] Fix object equals for SqlQueryRequest's binaryCommunication (#87887) Co-authored-by: owenniceliu --- docs/changelog/87887.yaml | 6 ++++++ .../org/elasticsearch/xpack/sql/action/SqlQueryRequest.java | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/87887.yaml diff --git a/docs/changelog/87887.yaml b/docs/changelog/87887.yaml new file mode 100644 index 0000000000000..c1f1f9ecda8d5 --- /dev/null +++ b/docs/changelog/87887.yaml @@ -0,0 +1,6 @@ +pr: 87887 +summary: fix object equals +area: SQL +type: bug +issues: + - [] diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java index c89d1f6d3a03d..ee2902992deb1 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java @@ -325,7 +325,7 @@ public boolean equals(Object obj) { return super.equals(obj) && fieldMultiValueLeniency == ((SqlQueryRequest) obj).fieldMultiValueLeniency && indexIncludeFrozen == ((SqlQueryRequest) obj).indexIncludeFrozen - && binaryCommunication == ((SqlQueryRequest) obj).binaryCommunication + && Objects.equals(binaryCommunication, ((SqlQueryRequest) obj).binaryCommunication) && keepOnCompletion == ((SqlQueryRequest) obj).keepOnCompletion && allowPartialSearchResults == ((SqlQueryRequest) obj).allowPartialSearchResults && Objects.equals(cursor, ((SqlQueryRequest) obj).cursor) From 84e63080f4f4e79fe06aa4be1cbd7f6f6e765139 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Fri, 5 Aug 2022 14:03:03 +0300 Subject: [PATCH 118/265] fix the changelog yaml file for pr 87887 (#89146) --- docs/changelog/87887.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/changelog/87887.yaml b/docs/changelog/87887.yaml index c1f1f9ecda8d5..43a077c47b3c4 100644 --- a/docs/changelog/87887.yaml +++ b/docs/changelog/87887.yaml @@ -2,5 +2,4 @@ pr: 87887 summary: fix object equals area: SQL type: bug -issues: - - [] +issues: [] From 2c79925cf273f899749f3d56709bf8ee7c9ca631 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 5 Aug 2022 08:38:35 -0700 Subject: [PATCH 119/265] Add missing dependency verification checksums (#89139) --- .../src/main/groovy/elasticsearch.fips.gradle | 11 +++++++++-- gradle/verification-metadata.xml | 16 ++++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle index 706334479ec28..a020262b5b852 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle @@ -6,10 +6,11 @@ * Side Public License, v 1. */ + import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.testclusters.TestDistribution import org.elasticsearch.gradle.testclusters.TestClustersAware +import org.elasticsearch.gradle.testclusters.TestDistribution // Common config when running with a FIPS-140 runtime JVM if (BuildParams.inFipsJvm) { @@ -31,7 +32,13 @@ if (BuildParams.inFipsJvm) { copy 'fips_java.policy' copy 'cacerts.bcfks' } - def extraFipsJarsConfiguration = configurations.detachedConfiguration(bcFips, bcTlsFips) + + def extraFipsJarsConfiguration = configurations.create("fipsImplementation") { + withDependencies { + add(bcFips) + add(bcTlsFips) + } + } project.afterEvaluate { // ensure that bouncycastle is on classpath for the all of test types, must happen in evaluateAfter since the rest tests explicitly diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index d51d3019be1ae..366c503becd44 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -37,6 +37,9 @@ + + + @@ -1405,6 +1408,9 @@ + + + @@ -2753,6 +2759,11 @@ + + + + + @@ -3616,5 +3627,10 @@ + + + + + From a67920e1dcc3c2f11e6f935f2a87e8528d35331f Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Sun, 7 Aug 2022 11:16:49 +0100 Subject: [PATCH 120/265] Add file delete retry to testcluster ElasticsearchNode (#89095) Add retry logic for cleanup / deletion in testcluster's ElasticsearchNode, to tolerate the asynchronous nature of deletions on the Windows file-system. --- .../testclusters/ElasticsearchNode.java | 91 +++++++++++++++++-- 1 file changed, 85 insertions(+), 6 deletions(-) diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index fcc4640ae43ca..bca06c302d2a5 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -62,6 +62,8 @@ import java.io.IOException; import java.io.InputStream; import java.io.LineNumberReader; +import java.io.PrintWriter; +import java.io.StringWriter; import java.io.UncheckedIOException; import java.net.URL; import java.nio.charset.StandardCharsets; @@ -489,6 +491,13 @@ public void freeze() { configurationFrozen.set(true); } + private static String throwableToString(Throwable t) { + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + t.printStackTrace(pw); + return sw.toString(); + } + @Override public synchronized void start() { LOGGER.info("Starting `{}`", this); @@ -505,11 +514,9 @@ public synchronized void start() { // make sure we always start fresh if (Files.exists(workingDir)) { if (preserveDataDir) { - Files.list(workingDir) - .filter(path -> path.equals(confPathData) == false) - .forEach(path -> fileSystemOperations.delete(d -> d.delete(path))); + Files.list(workingDir).filter(path -> path.equals(confPathData) == false).forEach(this::uncheckedDeleteWithRetry); } else { - fileSystemOperations.delete(d -> d.delete(workingDir)); + deleteWithRetry(workingDir); } } isWorkingDirConfigured = true; @@ -517,7 +524,13 @@ public synchronized void start() { setupNodeDistribution(getExtractedDistributionDir()); createWorkingDir(); } catch (IOException e) { - throw new UncheckedIOException("Failed to create working directory for " + this, e); + String msg = "Failed to create working directory for " + this + ", with: " + e + throwableToString(e); + logToProcessStdout(msg); + throw new UncheckedIOException(msg, e); + } catch (org.gradle.api.UncheckedIOException e) { + String msg = "Failed to create working directory for " + this + ", with: " + e + throwableToString(e); + logToProcessStdout(msg); + throw e; } copyExtraJars(); @@ -1192,9 +1205,75 @@ private void waitForProcessToExit(ProcessHandle processHandle) { } } + private static final int RETRY_DELETE_MILLIS = OS.current() == OS.WINDOWS ? 500 : 0; + private static final int MAX_RETRY_DELETE_TIMES = OS.current() == OS.WINDOWS ? 15 : 0; + + /** + * Deletes a path, retrying if necessary. + * + * @param path the path to delete + * @throws IOException + * if an I/O error occurs + */ + void deleteWithRetry(Path path) throws IOException { + try { + deleteWithRetry0(path); + } catch (InterruptedException x) { + throw new IOException("Interrupted while deleting.", x); + } + } + + /** Unchecked variant of deleteWithRetry. */ + void uncheckedDeleteWithRetry(Path path) { + try { + deleteWithRetry0(path); + } catch (IOException e) { + throw new UncheckedIOException(e); + } catch (InterruptedException x) { + throw new UncheckedIOException("Interrupted while deleting.", new IOException()); + } + } + + // The exception handling here is loathsome, but necessary! + private void deleteWithRetry0(Path path) throws IOException, InterruptedException { + int times = 0; + IOException ioe = null; + while (true) { + try { + fileSystemOperations.delete(d -> d.delete(path)); + times++; + // Checks for absence of the file. Semantics of Files.exists() is not the same. + while (Files.notExists(path) == false) { + if (times > MAX_RETRY_DELETE_TIMES) { + throw new IOException("File still exists after " + times + " waits."); + } + Thread.sleep(RETRY_DELETE_MILLIS); + // retry + fileSystemOperations.delete(d -> d.delete(path)); + times++; + } + break; + } catch (NoSuchFileException ignore) { + // already deleted, ignore + break; + } catch (org.gradle.api.UncheckedIOException | IOException x) { + if (x.getCause() instanceof NoSuchFileException) { + // already deleted, ignore + break; + } + // Backoff/retry in case another process is accessing the file + times++; + if (ioe == null) ioe = new IOException(); + ioe.addSuppressed(x); + if (times > MAX_RETRY_DELETE_TIMES) throw ioe; + Thread.sleep(RETRY_DELETE_MILLIS); + } + } + } + private void createWorkingDir() throws IOException { // Start configuration from scratch in case of a restart - fileSystemOperations.delete(d -> d.delete(configFile.getParent())); + deleteWithRetry(configFile.getParent()); Files.createDirectories(configFile.getParent()); Files.createDirectories(confPathRepo); Files.createDirectories(confPathData); From d2fe335a6c7f1e3a45e6240e60da0a8b30907162 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Mon, 8 Aug 2022 08:24:35 +0200 Subject: [PATCH 121/265] Fix testFollowIndex (#89116) If queue is limited then update settings might fail to add a listener to wait for a connection, however remote should be connected eventually by a concurrent task. --- .../xpack/ccr/RestartIndexFollowingIT.java | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java index 896a072a95778..e4900f98bb1b8 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.RemoteConnectionInfo; import org.elasticsearch.transport.RemoteConnectionStrategy; import org.elasticsearch.transport.TransportService; @@ -33,10 +32,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; -@TestLogging( - value = "org.elasticsearch.transport.RemoteClusterService:DEBUG,org.elasticsearch.transport.SniffConnectionStrategy:TRACE", - reason = "https://github.com/elastic/elasticsearch/issues/81302" -) public class RestartIndexFollowingIT extends CcrIntegTestCase { @Override @@ -121,13 +116,31 @@ public void testFollowIndex() throws Exception { } private void setupRemoteCluster() throws Exception { + var remoteMaxPendingConnectionListeners = getRemoteMaxPendingConnectionListeners(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().masterNodeTimeout(TimeValue.MAX_VALUE); String address = getLeaderCluster().getAnyMasterNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); updateSettingsRequest.persistentSettings(Settings.builder().put("cluster.remote.leader_cluster.seeds", address)); assertAcked(followerClient().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - List infos = followerClient().execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest()).get().getInfos(); - assertThat(infos.size(), equalTo(1)); - assertTrue(infos.get(0).isConnected()); + + if (remoteMaxPendingConnectionListeners == 1) { + // if queue is limited then update settings might fail to add a listener to wait for a connection + // however remote should be connected eventually by a concurrent task + assertBusy(this::isRemoteConnected); + } else { + assertTrue(isRemoteConnected()); + } + } + + private boolean isRemoteConnected() throws Exception { + var infos = followerClient().execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest()).get().getInfos(); + return infos.size() == 1 && infos.get(0).isConnected(); + } + + private Integer getRemoteMaxPendingConnectionListeners() { + var response = followerClient().admin().cluster().prepareNodesInfo("_local").clear().setSettings(true).get(); + var settings = response.getNodes().get(0).getSettings(); + return RemoteConnectionStrategy.REMOTE_MAX_PENDING_CONNECTION_LISTENERS.get(settings); } private void cleanRemoteCluster() throws Exception { From 63f1ab5ab227c0fbeeeb3e90c77653fb069f9760 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Mon, 8 Aug 2022 08:25:13 +0200 Subject: [PATCH 122/265] Fix flaky StableMasterDisruptionIT#testNoQuorum (#89064) Above test may fail if the master node replies within 1s. This happens on some of our slower CI workers. Whitelisting additional error message. --- .../discovery/StableMasterDisruptionIT.java | 57 +++++++++---------- .../CoordinationDiagnosticsService.java | 4 +- 2 files changed, 28 insertions(+), 33 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index 7e92e73187bd8..d83712dde30da 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -46,6 +46,7 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; +import org.hamcrest.Matcher; import org.junit.Before; import java.io.IOException; @@ -63,6 +64,7 @@ import java.util.concurrent.TimeUnit; import static java.util.Collections.singleton; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -129,19 +131,15 @@ public void testFailWithMinimumMasterNodesConfigured() throws Exception { } private void assertGreenMasterStability(Client client) throws Exception { - assertMasterStability(client, HealthStatus.GREEN, "The cluster has a stable master node"); + assertMasterStability(client, HealthStatus.GREEN, containsString("The cluster has a stable master node")); } - private void assertMasterStability(Client client, HealthStatus expectedStatus, String expectedSummarySubstring) throws Exception { + private void assertMasterStability(Client client, HealthStatus expectedStatus, Matcher expectedMatcher) throws Exception { assertBusy(() -> { GetHealthAction.Response healthResponse = client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true)).get(); String debugInformation = xContentToString(healthResponse); assertThat(debugInformation, healthResponse.getStatus(), equalTo(expectedStatus)); - assertThat( - debugInformation, - healthResponse.findIndicator("master_is_stable").symptom(), - containsString(expectedSummarySubstring) - ); + assertThat(debugInformation, healthResponse.findIndicator("master_is_stable").symptom(), expectedMatcher); }); } @@ -413,7 +411,7 @@ public void testRepeatedMasterChanges(String expectedMasterStabilitySymptomSubst * other node(s) were master, it only saw itself as master. So we want to check with another node. */ Client client = internalCluster().client(randomFrom(nodeNamesExceptFirstMaster)); - assertMasterStability(client, HealthStatus.YELLOW, expectedMasterStabilitySymptomSubstring); + assertMasterStability(client, HealthStatus.YELLOW, containsString(expectedMasterStabilitySymptomSubstring)); } public void testRepeatedNullMasterRecognizedAsGreenIfMasterDoesNotKnowItIsUnstable() throws Exception { @@ -506,7 +504,7 @@ public void testNoMasterEligibleNodes() throws Exception { assertMasterStability( internalCluster().client(randomFrom(dataNodes)), HealthStatus.RED, - "No master eligible nodes found in the cluster" + containsString("No master eligible nodes found in the cluster") ); for (String dataNode : dataNodes) { internalCluster().stopNode(dataNode); @@ -563,7 +561,7 @@ public void testCannotJoinLeader() throws Exception { assertMasterStability( internalCluster().client(randomFrom(dataNodes)), HealthStatus.RED, - "has been elected master, but the node being queried" + containsString("has been elected master, but the node being queried") ); } @@ -574,26 +572,15 @@ public void testNoQuorum() throws Exception { * low on the data nodes, so when we run the master stability check on each of the master nodes, it will see that there has been no * master recently and because there is no quorum, so it returns a RED status. */ - final List masterNodes = internalCluster().startMasterOnlyNodes( - 3, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) - .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), TimeValue.ZERO) - .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(1, TimeUnit.SECONDS)) - .build() - ); - final List dataNodes = internalCluster().startDataOnlyNodes( - 2, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) - .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), TimeValue.ZERO) - .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(1, TimeUnit.SECONDS)) - .build() - ); + var settings = Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) + .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), TimeValue.ZERO) + .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(1, TimeUnit.SECONDS)) + .build(); + var masterNodes = internalCluster().startMasterOnlyNodes(3, settings); + var dataNodes = internalCluster().startDataOnlyNodes(2, settings); ensureStableCluster(5); String firstMasterNode = internalCluster().getMasterName(); List nonActiveMasterNodes = masterNodes.stream().filter(nodeName -> firstMasterNode.equals(nodeName) == false).toList(); @@ -610,7 +597,15 @@ public void testNoQuorum() throws Exception { networkDisconnect.startDisrupting(); internalCluster().stopNode(firstMasterNode); for (String nonActiveMasterNode : nonActiveMasterNodes) { - assertMasterStability(internalCluster().client(nonActiveMasterNode), HealthStatus.RED, "unable to form a quorum"); + assertMasterStability( + internalCluster().client(nonActiveMasterNode), + HealthStatus.RED, + anyOf( + containsString("unable to form a quorum"), + containsString("No master node observed in the last 1s, and the cause has not been determined.") + // later happens if master node has not replied within 1s + ) + ); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index b5f8afaea09ab..d89eb4b55a8f7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -385,7 +385,7 @@ static CoordinationDiagnosticsResult diagnoseOnHaveNotSeenMasterRecentlyAndWeAre CoordinationDiagnosticsStatus.RED, String.format( Locale.ROOT, - "No master node observed in the last %s, and an exception occurred while reaching out " + "to %s for diagnosis", + "No master node observed in the last %s, and an exception occurred while reaching out to %s for diagnosis", nodeHasMasterLookupTimeframe, entry.getKey().getName() ), @@ -517,7 +517,7 @@ static boolean anyNodeInClusterReportsQuorumProblems( .map( entry -> String.format( Locale.ROOT, - "%s reports that a quorum " + "cannot be formed: [%s]", + "%s reports that a quorum cannot be formed: [%s]", entry.getKey().getName(), entry.getValue() ) From 7c38041b9e91bf22503627eb500f521ef570ac71 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Mon, 8 Aug 2022 09:13:34 +0200 Subject: [PATCH 123/265] Make it explicit that test expects no rebalancing. (#89040) This is required in case new shards allocator might be more proactive with rebalancing. --- .../elasticsearch/gateway/ReplicaShardAllocatorIT.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java index 96b985e0286f9..fefb93e537975 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java @@ -463,7 +463,12 @@ public void testPeerRecoveryForClosedIndices() throws Exception { client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put("cluster.routing.allocation.enable", "primaries").build()) + .setPersistentSettings( + Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.PRIMARIES) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.PRIMARIES) + .build() + ) ); internalCluster().fullRestart(); ensureYellow(indexName); @@ -475,7 +480,7 @@ public void testPeerRecoveryForClosedIndices() throws Exception { client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().putNull("cluster.routing.allocation.enable").build()) + .setPersistentSettings(Settings.builder().putNull(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey()).build()) ); ensureGreen(indexName); assertNoOpRecoveries(indexName); From c4bd4d3cbf5bc0f837bfc82f1ebbde98ed08a01c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Montalv=C3=A3o=20Marques?= <9379664+GonMMarques@users.noreply.github.com> Date: Mon, 8 Aug 2022 08:59:47 +0100 Subject: [PATCH 124/265] Fix typo in geo-distance-query doc (#89148) --- docs/reference/query-dsl/geo-distance-query.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index 5c1b0a1ecfc3f..5fc39a415acab 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -11,7 +11,7 @@ a given distance of a geopoint. [[geo-distance-query-ex]] ==== Example -Assume the following the following documents are indexed: +Assume the following documents are indexed: [source,console] -------------------------------------------------- From 259d2e0b1d8ed3946153ea9aeefb797beca9ff6b Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Mon, 8 Aug 2022 09:28:44 +0100 Subject: [PATCH 125/265] Fix typo in TRACING.md --- TRACING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TRACING.md b/TRACING.md index b998850d43dc2..181d18e7ff760 100644 --- a/TRACING.md +++ b/TRACING.md @@ -45,7 +45,7 @@ For context, the APM agent pulls configuration from [multiple sources][agent-config], with a hierarchy that means, for example, that options set in the config file cannot be overridden via system properties. -Now, in order to send tracing data to the APM server, ES needs to configured with +Now, in order to send tracing data to the APM server, ES needs to be configured with either a `secret_key` or an `api_key`. We could configure these in the agent via system properties, but then their values would be available to any Java code in Elasticsearch that can read system properties. From c81f907ad8e625dddb415e2b3cac2e778ffe95a0 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 8 Aug 2022 10:06:32 +0100 Subject: [PATCH 126/265] Refine size-your-shards wording (#89081) Clarify that the limits in the docs are absolute maxima that will avoid things just breaking but won't necessarily give great performance. --- .../how-to/size-your-shards.asciidoc | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index c06986d405f9b..a1236a11525f9 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -175,17 +175,25 @@ index prirep shard store [discrete] [[shard-count-recommendation]] -==== Aim for 3000 indices or fewer per GB of heap memory on each master node +==== Master-eligible nodes should have at least 1GB of heap per 3000 indices The number of indices a master node can manage is proportional to its heap size. The exact amount of heap memory needed for each index depends on various factors such as the size of the mapping and the number of shards per index. -As a general rule of thumb, you should aim for 3000 indices or fewer per GB of -heap on master nodes. For example, if your cluster contains 12000 indices then -each dedicated master node should have at least 4GB of heap. For non-dedicated -master nodes, the same rule holds and should be added to the heap requirements -of the other roles of each node. +As a general rule of thumb, you should have fewer than 3000 indices per GB of +heap on master nodes. For example, if your cluster has dedicated master nodes +with 4GB of heap each then you should have fewer than 12000 indices. If your +master nodes are not dedicated master nodes then the same sizing guidance +applies: you should reserve at least 1GB of heap on each master-eligible node +for every 3000 indices in your cluster. + +Note that this rule defines the absolute maximum number of indices that a +master node can manage, but does not guarantee the performance of searches or +indexing involving this many indices. You must also ensure that your data nodes +have adequate resources for your workload and that your overall sharding +strategy meets all your performance requirements. See also +<> and <>. To check the configured size of each node's heap, use the <>. @@ -207,7 +215,7 @@ GET _cat/shards?v=true [discrete] [[field-count-recommendation]] -==== Allow 1kB of heap per field per index on data nodes, plus overheads +==== Data nodes should have at least 1kB of heap per field per index, plus overheads The exact resource usage of each mapped field depends on its type, but a rule of thumb is to allow for approximately 1kB of heap overhead per mapped field @@ -222,6 +230,13 @@ For example, if a data node holds shards from 1000 indices, each containing of heap for the fields and another 0.5GB of heap for its workload and other overheads, and therefore this node will need a heap size of at least 4.5GB. +Note that this rule defines the absolute maximum number of indices that a data +node can manage, but does not guarantee the performance of searches or indexing +involving this many indices. You must also ensure that your data nodes have +adequate resources for your workload and that your overall sharding strategy +meets all your performance requirements. See also <> +and <>. + [discrete] [[avoid-node-hotspots]] ==== Avoid node hotspots From 36c4a17087ff49173ddc11bbc4278311459f36ad Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Mon, 8 Aug 2022 12:12:24 +0200 Subject: [PATCH 127/265] Do not generate empty buckets for the date histogram (#89070) If the date histogram interval is large and the 'fixed_interval' parameter is very small we might end up with a large number of buckets in the resulting histogram, in case we also generate empty buckets. As a result of this we might generate too many buckets (max date - min date) / fixed_interval > 65536 (roughly).. Here we set minDocCount to 1 so to avoid generation of empty buckets. In the test the maximum value for 'docCount' is 9000 which means, in the worst case we generate 9000 documents, each belonging to a different bucket. In this case we would have 9000 buckets maximum which is well below the default maximum number of buckets allowed by default. --- .../xpack/rollup/v2/RollupActionSingleNodeTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java index 721c60876b730..3bffb800a6801 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java @@ -818,7 +818,7 @@ private AggregationBuilder buildAggregations( .size(10_000); final DateHistogramAggregationBuilder dateHistogramAggregation = new DateHistogramAggregationBuilder("timestamp").field( config.getTimestampField() - ).fixedInterval(config.getInterval()); + ).fixedInterval(config.getInterval()).minDocCount(1); if (config.getTimeZone() != null) { dateHistogramAggregation.timeZone(ZoneId.of(config.getTimeZone())); } From ee33383156f36d47725ef8b31aae5d46e4bf5da0 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 8 Aug 2022 08:29:09 -0500 Subject: [PATCH 128/265] Polling for cluster diagnostics information (#89014) This commit causes non-master-eligible nodes to poll a random master-eligible node every 10 seconds whenever the elected master goes null for diagnostic information in support of the health API's master stability check. --- docs/changelog/89014.yaml | 5 + .../CoordinationDiagnosticsServiceIT.java | 126 ++++++++ .../CoordinationDiagnosticsService.java | 298 +++++++++++++++--- .../CoordinationDiagnosticsServiceTests.java | 85 +++++ .../AbstractCoordinatorTestCase.java | 21 +- 5 files changed, 485 insertions(+), 50 deletions(-) create mode 100644 docs/changelog/89014.yaml diff --git a/docs/changelog/89014.yaml b/docs/changelog/89014.yaml new file mode 100644 index 0000000000000..1e617fbf54121 --- /dev/null +++ b/docs/changelog/89014.yaml @@ -0,0 +1,5 @@ +pr: 89014 +summary: Polling for cluster diagnostics information +area: Health +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java index 010f5a5f0300c..cdd84a829b44f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java @@ -14,17 +14,25 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.BlockClusterStateProcessing; +import org.elasticsearch.test.disruption.NetworkDisruption; +import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.threadpool.ThreadPool; import org.junit.Before; import java.io.IOException; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; @@ -41,6 +49,11 @@ private void setBootstrapMasterNodeIndex() { internalCluster().setBootstrapMasterNodeIndex(0); } + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(MockTransportService.TestPlugin.class); + } + public void testBlockClusterStateProcessingOnOneNode() throws Exception { /* * This test picks a node that is not elected master, and then blocks cluster state processing on it. The reason is so that we @@ -102,6 +115,119 @@ public void testBlockClusterStateProcessingOnOneNode() throws Exception { disruption.stopDisrupting(); } + public void testBeginPollingRemoteStableMasterHealthIndicatorService() throws Exception { + /* + * This test picks a node that is not elected master, and then blocks cluster state processing on it. The reason is so that we + * can call CoordinationDiagnosticsService#beginPollingRemoteMasterStabilityDiagnostic without a cluster changed event + * resulting in the values we pass in being overwritten. + */ + final List nodeNames = internalCluster().startNodes(3); + ensureStableCluster(3); + + final String master = internalCluster().getMasterName(); + assertThat(nodeNames, hasItem(master)); + String blockedNode = nodeNames.stream().filter(n -> n.equals(master) == false).findAny().get(); + assertNotNull(blockedNode); + + DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, master).state().nodes(); + Set nodesWithoutBlockedNode = discoveryNodes.getNodes() + .values() + .stream() + .filter(n -> n.getName().equals(blockedNode) == false) + .collect(Collectors.toSet()); + + BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(blockedNode, random()); + internalCluster().setDisruptionScheme(disruption); + // stop processing cluster state changes + disruption.startDisrupting(); + + CoordinationDiagnosticsService diagnosticsOnBlockedNode = internalCluster().getInstance( + CoordinationDiagnosticsService.class, + blockedNode + ); + AtomicReference result = new AtomicReference<>(); + AtomicReference cancellable = new AtomicReference<>(); + diagnosticsOnBlockedNode.remoteCoordinationDiagnosisResult = result; + diagnosticsOnBlockedNode.remoteCoordinationDiagnosisTask = cancellable; + + diagnosticsOnBlockedNode.remoteRequestInitialDelay = TimeValue.ZERO; + diagnosticsOnBlockedNode.beginPollingRemoteMasterStabilityDiagnostic(result::set, cancellable); + + // while the node is blocked from processing cluster state changes it should reach out to the other 2 + // master eligible nodes and get a successful response + assertBusy(() -> { + assertNotNull(result.get()); + assertNotNull(cancellable.get()); + assertNotNull(result.get().result()); + assertNull(result.get().remoteException()); + }); + + disruption.stopDisrupting(); + } + + public void testNoQuorumSeenFromNonMasterNodes() throws Exception { + /* + * In this test we have three master-eligible nodes. We make it so that the two non-active ones cannot communicate, and then we + * stop the active master node. Now there is no quorum so a new master cannot be elected. We set the master lookup threshold very + * low on the data nodes, so when we run the master stability check on each of the master nodes, it will see that there has been no + * master recently because there is no quorum, so it returns a RED status. In this test we then check the value of + * remoteCoordinationDiagnosisResult on each of the non-master-eligible nodes to make sure that they have reached out to one of + * the master-eligible nodes to get the expected result. + */ + final List masterNodes = internalCluster().startMasterOnlyNodes( + 3, + Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) + .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), TimeValue.ZERO) + .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(1, TimeUnit.SECONDS)) + .build() + ); + final List dataNodes = internalCluster().startDataOnlyNodes( + 2, + Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) + .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), TimeValue.ZERO) + .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(1, TimeUnit.SECONDS)) + .build() + ); + internalCluster().getInstances(CoordinationDiagnosticsService.class) + .forEach(coordinationDiagnosticsService -> coordinationDiagnosticsService.remoteRequestInitialDelay = TimeValue.ZERO); + ensureStableCluster(5); + String firstMasterNode = internalCluster().getMasterName(); + List nonActiveMasterNodes = masterNodes.stream().filter(nodeName -> firstMasterNode.equals(nodeName) == false).toList(); + NetworkDisruption networkDisconnect = new NetworkDisruption( + new NetworkDisruption.TwoPartitions( + Set.of(nonActiveMasterNodes.get(0), dataNodes.get(0)), + Set.of(nonActiveMasterNodes.get(1), dataNodes.get(1)) + ), + NetworkDisruption.UNRESPONSIVE + ); + internalCluster().clearDisruptionScheme(); + setDisruptionScheme(networkDisconnect); + networkDisconnect.startDisrupting(); + internalCluster().stopNode(firstMasterNode); + + assertBusy(() -> { + dataNodes.forEach(dataNode -> { + CoordinationDiagnosticsService diagnosticsOnBlockedNode = internalCluster().getInstance( + CoordinationDiagnosticsService.class, + dataNode + ); + assertNotNull(diagnosticsOnBlockedNode.remoteCoordinationDiagnosisResult); + assertNotNull(diagnosticsOnBlockedNode.remoteCoordinationDiagnosisResult.get()); + CoordinationDiagnosticsService.CoordinationDiagnosticsResult result = + diagnosticsOnBlockedNode.remoteCoordinationDiagnosisResult.get().result(); + assertNotNull(result); + assertThat(result.status(), equalTo(CoordinationDiagnosticsService.CoordinationDiagnosticsStatus.RED)); + assertThat(result.summary(), containsString("unable to form a quorum")); + }); + }); + } + public void testNoMasterElected() throws Exception { /* * This test starts up a 3-node cluster where all nodes are master eligible. It then shuts down two of the nodes and restarts one diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index d89eb4b55a8f7..77e585a0178b1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -13,8 +13,12 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.StepListener; import org.elasticsearch.action.admin.cluster.coordination.ClusterFormationInfoAction; +import org.elasticsearch.action.admin.cluster.coordination.CoordinationDiagnosticsAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -49,7 +53,9 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.stream.Collectors; @@ -99,6 +105,18 @@ public class CoordinationDiagnosticsService implements ClusterStateListener { // Non-private for testing volatile ConcurrentMap clusterFormationResponses = null; + /* + * This is a reference to the task that is periodically reaching out to a master eligible node to get its CoordinationDiagnosticsResult + * for diagnosis. It is null when no polling is occurring. + * The field is accessed (reads/writes) from multiple threads, and is also reassigned on multiple threads. + */ + volatile AtomicReference remoteCoordinationDiagnosisTask = null; + /* + * This field holds the result of the task in the remoteCoordinationDiagnosisTask field above. The field is accessed + * (reads/writes) from multiple threads, but is only ever reassigned on a single thread (the cluster change event thread). + */ + volatile AtomicReference remoteCoordinationDiagnosisResult = null; + /** * This is the amount of time that we wait before scheduling a remote request to gather diagnostic information. It is not * user-configurable, but is non-final so that integration tests don't have to waste 10 seconds. @@ -636,6 +654,13 @@ public void clusterChanged(ClusterChangedEvent event) { } else { cancelPollingClusterFormationInfo(); } + if (clusterService.localNode().isMasterNode() == false) { + if (currentMaster == null) { + beginPollingRemoteMasterStabilityDiagnostic(); + } else { + cancelPollingRemoteMasterStabilityDiagnostic(); + } + } } /** @@ -649,7 +674,7 @@ void beginPollingClusterFormationInfo() { Map cancellables = new ConcurrentHashMap<>(); /* * Assignment of clusterFormationInfoTasks must be done before the call to beginPollingClusterFormationInfo because it is used - * asynchronously by rescheduleFetchConsumer, called from beginPollingClusterFormationInfo. + * asynchronously by rescheduleClusterFormationFetchConsumer, called from beginPollingClusterFormationInfo. */ clusterFormationInfoTasks = cancellables; clusterFormationResponses = responses; @@ -676,7 +701,9 @@ void beginPollingClusterFormationInfo( masterEligibleNode, fetchClusterFormationInfo( masterEligibleNode, - responseConsumer.andThen(rescheduleFetchConsumer(masterEligibleNode, responseConsumer, cancellables)) + responseConsumer.andThen( + rescheduleClusterFormationFetchConsumer(masterEligibleNode, responseConsumer, cancellables) + ) ) ); } catch (EsRejectedExecutionException e) { @@ -690,14 +717,14 @@ void beginPollingClusterFormationInfo( } /** - * This wraps the responseConsumer in a Consumer that will run rescheduleFetchConsumer() after responseConsumer has - * completed, adding the resulting Cancellable to cancellableConsumer. + * This wraps the responseConsumer in a Consumer that will run rescheduleClusterFormationFetchConsumer() after responseConsumer has + * completed, adding the resulting Cancellable to cancellables. * @param masterEligibleNode The node being polled * @param responseConsumer The response consumer to be wrapped * @param cancellables The Map of Cancellables, one for each node being polled * @return */ - private Consumer rescheduleFetchConsumer( + private Consumer rescheduleClusterFormationFetchConsumer( DiscoveryNode masterEligibleNode, Consumer responseConsumer, Map cancellables @@ -718,15 +745,18 @@ private Consumer responseConsumer + ) { + return sendTransportRequest( + node, + responseConsumer, + ClusterFormationInfoAction.INSTANCE, + new ClusterFormationInfoAction.Request(), + (response, e) -> { + assert response != null || e != null : "a response or an exception must be provided"; + if (response != null) { + return new ClusterFormationStateOrException(response.getClusterFormationState()); + } else { + return new ClusterFormationStateOrException(e); + } + } + ); + } + + void beginPollingRemoteMasterStabilityDiagnostic() { + assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME); + AtomicReference cancellableReference = new AtomicReference<>(); + AtomicReference resultReference = new AtomicReference<>(); + remoteCoordinationDiagnosisTask = cancellableReference; + remoteCoordinationDiagnosisResult = resultReference; + beginPollingRemoteMasterStabilityDiagnostic(resultReference::set, cancellableReference); + } + + /** + * This method returns quickly, but in the background schedules to query a remote master node's cluster diagnostics in 10 seconds, and + * repeats doing that until cancelPollingRemoteMasterStabilityDiagnostic() is called. This method + * exists (rather than being just part of the beginPollingRemoteMasterStabilityDiagnostic() above) in order to facilitate + * unit testing. + * @param responseConsumer A consumer for any results produced for a node by this method + * @param cancellableReference The Cancellable reference to assign the current Cancellable for this polling attempt + */ + // Non-private for testing + void beginPollingRemoteMasterStabilityDiagnostic( + Consumer responseConsumer, + AtomicReference cancellableReference + ) { + DiscoveryNode masterEligibleNode = getMasterEligibleNodes().stream().findAny().orElse(null); + try { + cancellableReference.set( + fetchCoordinationDiagnostics( + masterEligibleNode, + responseConsumer.andThen(rescheduleDiagnosticsFetchConsumer(responseConsumer, cancellableReference)) + ) + ); + } catch (EsRejectedExecutionException e) { + if (e.isExecutorShutdown()) { + logger.trace("Not rescheduling request for cluster coordination info because this node is being shutdown", e); + } else { + throw e; + } + } + } + + /** + * This wraps the responseConsumer in a Consumer that will run rescheduleDiagnosticsFetchConsumer() after responseConsumer has + * completed, adding the resulting Cancellable to cancellableReference. + * @param responseConsumer The response consumer to be wrapped + * @param cancellableReference The Cancellable reference to assign the current Cancellable for this polling attempt + * @return A wrapped Consumer that will run fetchCoordinationDiagnostics() + */ + private Consumer rescheduleDiagnosticsFetchConsumer( + Consumer responseConsumer, + AtomicReference cancellableReference + ) { + return response -> { + /* + * If the cancellableReference for this poll attempt is equal to remoteCoordinationDiagnosisTask, then that means that + * this poll attempt is the current one. If they are not equal, that means that + * cancelPollingRemoteMasterStabilityDiagnostic() has been called on this poll attempt but this thread is not yet + * aware. So we cancel the Cancellable in cancellableReference if it is not null. Note that + * remoteCoordinationDiagnosisTask can be null. + */ + if (cancellableReference.equals(remoteCoordinationDiagnosisTask)) { + /* + * Because this is not synchronized with the cancelPollingRemoteMasterStabilityDiagnostic() method, there is a + * slim chance that we will add a task here for a poll that has already been cancelled. But when it completes and runs + * rescheduleDiagnosticsFetchConsumer() we will then see that remoteCoordinationDiagnosisTask does not equal + * cancellableReference, so it will not be run again. + */ + try { + DiscoveryNode masterEligibleNode = getMasterEligibleNodes().stream().findAny().orElse(null); + cancellableReference.set( + fetchCoordinationDiagnostics( + masterEligibleNode, + responseConsumer.andThen(rescheduleDiagnosticsFetchConsumer(responseConsumer, cancellableReference)) + ) + ); + } catch (EsRejectedExecutionException e) { + if (e.isExecutorShutdown()) { + logger.trace("Not rescheduling request for cluster coordination info because this node is being shutdown", e); + } else { + throw e; + } + } + } else { + Scheduler.Cancellable cancellable = cancellableReference.get(); + if (cancellable != null) { + cancellable.cancel(); + } + } + }; + } + + /** + * This method returns quickly, but in the background schedules to query the remote masterEligibleNode's cluster diagnostics in 10 + * seconds unless cancel() is called on the Cancellable that this method returns. + * @param masterEligibleNode The masterEligibleNode to poll for cluster diagnostics. This masterEligibleNode can be null in the case + * when there are not yet any master-eligible nodes known to this masterEligibleNode's PeerFinder. + * @param responseConsumer The consumer of the cluster diagnostics for the masterEligibleNode, or the exception encountered while + * contacting it + * @return A Cancellable for the task that is scheduled to fetch cluster diagnostics + */ + private Scheduler.Cancellable fetchCoordinationDiagnostics( + @Nullable DiscoveryNode masterEligibleNode, + Consumer responseConsumer + ) { + return sendTransportRequest( + masterEligibleNode, + responseConsumer, + CoordinationDiagnosticsAction.INSTANCE, + new CoordinationDiagnosticsAction.Request(true), + (response, e) -> { + assert response != null || e != null : "a response or an exception must be provided"; + if (response != null) { + return new RemoteMasterHealthResult(masterEligibleNode, response.getCoordinationDiagnosticsResult(), null); + } else { + return new RemoteMasterHealthResult(masterEligibleNode, null, e); + } + } + ); + } + + /** + * This method connects to masterEligibleNode and sends it a transport request for a response of type R. The response or exception + * are transformed into a common type T with responseToResultFunction or exceptionToResultFunction, and then consumed by + * responseConsumer. This method is meant to be used when there is potentially no elected master node, so it first calls + * connectToNode before sending the request. + * @param masterEligibleNode The master eligible node to be queried, or null if we do not yet know of a master eligible node. + * If this is null, the responseConsumer will be given a null response + * @param responseConsumer The consumer of the transformed response + * @param transportActionType The ActionType for the transport action + * @param transportActionRequest The ActionRequest to be sent + * @param responseTransformationFunction A function that converts a response or exception to the response type expected by the + * responseConsumer + * @return A Cancellable for the task that is scheduled to fetch the remote information + */ + private Scheduler.Cancellable sendTransportRequest( + @Nullable DiscoveryNode masterEligibleNode, + Consumer responseConsumer, + ActionType transportActionType, + ActionRequest transportActionRequest, + BiFunction responseTransformationFunction ) { StepListener connectionListener = new StepListener<>(); - StepListener fetchClusterInfoListener = new StepListener<>(); + StepListener fetchRemoteResultListener = new StepListener<>(); long startTime = System.nanoTime(); connectionListener.whenComplete(releasable -> { - logger.trace("Opened connection to {}, making cluster coordination info request", node); - // If we don't get a response in 10 seconds that is a failure worth capturing on its own: - final TimeValue transportTimeout = TimeValue.timeValueSeconds(10); - transportService.sendRequest( - node, - ClusterFormationInfoAction.NAME, - new ClusterFormationInfoAction.Request(), - TransportRequestOptions.timeout(transportTimeout), - new ActionListenerResponseHandler<>( - ActionListener.runBefore(fetchClusterInfoListener, () -> Releasables.close(releasable)), - ClusterFormationInfoAction.Response::new - ) - ); + if (masterEligibleNode == null) { + responseConsumer.accept(null); + } else { + logger.trace("Opened connection to {}, making transport request", masterEligibleNode); + // If we don't get a response in 10 seconds that is a failure worth capturing on its own: + final TimeValue transportTimeout = TimeValue.timeValueSeconds(10); + transportService.sendRequest( + masterEligibleNode, + transportActionType.name(), + transportActionRequest, + TransportRequestOptions.timeout(transportTimeout), + new ActionListenerResponseHandler<>( + ActionListener.runBefore(fetchRemoteResultListener, () -> Releasables.close(releasable)), + transportActionType.getResponseReader() + ) + ); + } }, e -> { - logger.warn("Exception connecting to master node", e); - responseConsumer.accept(new ClusterFormationStateOrException(e)); + logger.warn("Exception connecting to master masterEligibleNode", e); + responseConsumer.accept(responseTransformationFunction.apply(null, e)); }); - fetchClusterInfoListener.whenComplete(response -> { + fetchRemoteResultListener.whenComplete(response -> { long endTime = System.nanoTime(); - logger.trace("Received cluster coordination info from {} in {}", node, TimeValue.timeValueNanos(endTime - startTime)); - responseConsumer.accept(new ClusterFormationStateOrException(response.getClusterFormationState())); + logger.trace("Received remote response from {} in {}", masterEligibleNode, TimeValue.timeValueNanos(endTime - startTime)); + responseConsumer.accept(responseTransformationFunction.apply(response, null)); }, e -> { - logger.warn("Exception in cluster coordination info request to master node", e); - responseConsumer.accept(new ClusterFormationStateOrException(e)); + logger.warn("Exception in remote request to master masterEligibleNode", e); + responseConsumer.accept(responseTransformationFunction.apply(null, e)); }); return transportService.getThreadPool().schedule(() -> { - Version minSupportedVersion = Version.V_8_4_0; - if (node.getVersion().onOrAfter(minSupportedVersion) == false) { // This was introduced in 8.4.0 - logger.trace( - "Cannot get cluster coordination info for {} because it is at version {} and {} is required", - node, - node.getVersion(), - minSupportedVersion - ); + if (masterEligibleNode == null) { + /* + * This node's PeerFinder hasn't yet discovered the master-eligible nodes. By notifying the responseConsumer with a null + * value we effectively do nothing, and allow this request to be recheduled. + */ + responseConsumer.accept(null); } else { - transportService.connectToNode( - // Note: This connection must be explicitly closed in the connectionListener - node, - ConnectionProfile.buildDefaultConnectionProfile(clusterService.getSettings()), - connectionListener - ); + Version minSupportedVersion = Version.V_8_4_0; + if (masterEligibleNode.getVersion().onOrAfter(minSupportedVersion) == false) { + logger.trace( + "Cannot get remote result from {} because it is at version {} and {} is required", + masterEligibleNode, + masterEligibleNode.getVersion(), + minSupportedVersion + ); + } else { + transportService.connectToNode( + // Note: This connection must be explicitly closed in the connectionListener + masterEligibleNode, + ConnectionProfile.buildDefaultConnectionProfile(clusterService.getSettings()), + connectionListener + ); + } } }, remoteRequestInitialDelay, ThreadPool.Names.SAME); } + void cancelPollingRemoteMasterStabilityDiagnostic() { + assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME); + if (remoteCoordinationDiagnosisTask != null) { + Scheduler.Cancellable task = remoteCoordinationDiagnosisTask.get(); + if (task != null) { + task.cancel(); + } + remoteCoordinationDiagnosisResult = null; + remoteCoordinationDiagnosisTask = null; + } + } + // Non-private for testing record ClusterFormationStateOrException( ClusterFormationFailureHelper.ClusterFormationState clusterFormationState, @@ -975,4 +1184,7 @@ public void writeTo(StreamOutput out) throws IOException { } } + + // Non-private for testing: + record RemoteMasterHealthResult(DiscoveryNode node, CoordinationDiagnosticsResult result, Exception remoteException) {} } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java index fa05f6e629fff..4205fd7b97099 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java @@ -38,10 +38,12 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static org.elasticsearch.cluster.coordination.AbstractCoordinatorTestCase.Cluster.EXTREME_DELAY_VARIABILITY; @@ -979,6 +981,89 @@ public void testBeginPollingClusterFormationInfoCancel() { } } + public void testBeginPollingRemoteMasterStabilityDiagnostic() throws Exception { + MasterHistoryService masterHistoryService = createMasterHistoryService(); + var clusterService = mock(ClusterService.class); + when(clusterService.getSettings()).thenReturn(Settings.EMPTY); + when(clusterService.state()).thenReturn(nullMasterClusterState); + DiscoveryNode localNode = new DiscoveryNode( + "node4", + randomNodeId(), + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Set.of(DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + when(clusterService.localNode()).thenReturn(localNode); + Coordinator coordinator = mock(Coordinator.class); + when(coordinator.getFoundPeers()).thenReturn(List.of(node1, node2, localNode)); + DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(); + ThreadPool threadPool = deterministicTaskQueue.getThreadPool(); + + TransportService transportService = mock(TransportService.class); + when(transportService.getThreadPool()).thenReturn(threadPool); + CoordinationDiagnosticsService coordinationDiagnosticsService = new CoordinationDiagnosticsService( + clusterService, + transportService, + coordinator, + masterHistoryService + ); + + coordinationDiagnosticsService.beginPollingRemoteMasterStabilityDiagnostic(); + assertNotNull(coordinationDiagnosticsService.remoteCoordinationDiagnosisTask); + assertNotNull(coordinationDiagnosticsService.remoteCoordinationDiagnosisTask.get()); + coordinationDiagnosticsService.cancelPollingRemoteMasterStabilityDiagnostic(); + assertThat(coordinationDiagnosticsService.remoteCoordinationDiagnosisTask, Matchers.nullValue()); + coordinationDiagnosticsService.clusterChanged( + new ClusterChangedEvent(TEST_SOURCE, nullMasterClusterState, node1MasterClusterState) + ); + assertNotNull(coordinationDiagnosticsService.remoteCoordinationDiagnosisTask); + assertNotNull(coordinationDiagnosticsService.remoteCoordinationDiagnosisTask.get()); + coordinationDiagnosticsService.clusterChanged( + new ClusterChangedEvent(TEST_SOURCE, node1MasterClusterState, nullMasterClusterState) + ); + assertThat(coordinationDiagnosticsService.remoteCoordinationDiagnosisTask, Matchers.nullValue()); + /* + * Note that in this test we will never find any values in remoteCoordinationDiagnosisResult because transportService is mocked out. + * There is not a reasonable way to plug in a transportService to this simple unit test, so testing that is left to an + * integration test. + */ + } + + public void testBeginPollingRemoteMasterStabilityDiagnosticCancel() { + /* + * This test sets up a 5-node cluster (3 master eligible). We call beginPollingRemoteMasterStabilityDiagnostic() on each + * non-master-eligible node. But we immediately call cancel, which is what will happen in practice most often since usually the + * master becomes null and then is immediately non-null when a new master is elected. This means that polling will not be started + * since there is a 10-second delay, and we expect no results. + */ + try (Cluster cluster = new Cluster(3, true, Settings.EMPTY)) { + createAndAddNonMasterNode(cluster); + createAndAddNonMasterNode(cluster); + cluster.runRandomly(); + cluster.stabilise(); + List masterNodes = cluster.clusterNodes.stream() + .map(Cluster.ClusterNode::getLocalNode) + .filter(DiscoveryNode::isMasterNode) + .toList(); + cluster.clusterNodes.stream().filter(node -> node.getLocalNode().isMasterNode() == false).forEach(node -> { + List healthResults = new ArrayList<>(); + AtomicReference cancellableReference = new AtomicReference<>(); + node.coordinationDiagnosticsService.beginPollingRemoteMasterStabilityDiagnostic(healthResults::add, cancellableReference); + cancellableReference.get().cancel(); + cluster.runRandomly(false, true, EXTREME_DELAY_VARIABILITY); + cluster.stabilise(); + + /* + * The cluster has now run normally for some period of time, but cancel() was called before polling began, so we expect + * no results: + */ + assertThat(healthResults.size(), equalTo(0)); + }); + + } + } + public void testResultSerialization() { CoordinationDiagnosticsService.CoordinationDiagnosticsStatus status = getRandomStatus(); CoordinationDiagnosticsService.CoordinationDiagnosticsDetails details = getRandomDetails(); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index 7beaa3c9456eb..971cf9c57d484 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -16,6 +16,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.coordination.ClusterFormationInfoAction; +import org.elasticsearch.action.admin.cluster.coordination.CoordinationDiagnosticsAction; import org.elasticsearch.action.admin.cluster.coordination.MasterHistoryAction; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction; import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction; @@ -1251,6 +1252,12 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { getElectionStrategy(), nodeHealthService ); + coordinationDiagnosticsService = new CoordinationDiagnosticsService( + clusterService, + transportService, + coordinator, + masterHistoryService + ); client.initialize( Map.of( NodesHotThreadsAction.INSTANCE, @@ -1258,7 +1265,13 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { MasterHistoryAction.INSTANCE, new MasterHistoryAction.TransportAction(transportService, new ActionFilters(Set.of()), masterHistoryService), ClusterFormationInfoAction.INSTANCE, - new ClusterFormationInfoAction.TransportAction(transportService, new ActionFilters(Set.of()), coordinator) + new ClusterFormationInfoAction.TransportAction(transportService, new ActionFilters(Set.of()), coordinator), + CoordinationDiagnosticsAction.INSTANCE, + new CoordinationDiagnosticsAction.TransportAction( + transportService, + new ActionFilters(Set.of()), + coordinationDiagnosticsService + ) ), transportService.getTaskManager(), localNode::getId, @@ -1266,12 +1279,6 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { null, getNamedWriteableRegistry() ); - coordinationDiagnosticsService = new CoordinationDiagnosticsService( - clusterService, - transportService, - coordinator, - masterHistoryService - ); stableMasterHealthIndicatorService = new StableMasterHealthIndicatorService(coordinationDiagnosticsService); masterService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService( From 226b8a260ec4f56bd6d958b8919887e1e3ee3116 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Mon, 8 Aug 2022 15:44:00 +0200 Subject: [PATCH 129/265] [DOCS] Modifies the description of frequency. (#89128) --- docs/reference/rest-api/common-parms.asciidoc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index e12590e18106c..770ac2e4ffd89 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -313,9 +313,8 @@ end::http-format[] tag::frequency[] The interval between checks for changes in the source indices when the -{transform} is running continuously. Also determines the retry interval in the -event of transient failures while the {transform} is searching or indexing. The -minimum value is `1s` and the maximum is `1h`. The default value is `1m`. +{transform} is running continuously. The minimum value is `1s` and the maximum +is `1h`. The default value is `1m`. end::frequency[] tag::from[] From 760201538496573c8395d43d88ee7d874e357002 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Mon, 8 Aug 2022 15:46:29 +0200 Subject: [PATCH 130/265] [DOCS] Improves frequent items aggregation docs (#89122) --- .../frequent-items-aggregation.asciidoc | 50 ++++++++++++++----- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/docs/reference/aggregations/bucket/frequent-items-aggregation.asciidoc b/docs/reference/aggregations/bucket/frequent-items-aggregation.asciidoc index 8eda65232bcde..4690ca9ffea5d 100644 --- a/docs/reference/aggregations/bucket/frequent-items-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/frequent-items-aggregation.asciidoc @@ -6,12 +6,11 @@ experimental::[] -A bucket aggregation which finds frequent item sets. It -is a form of association rules mining that identifies items that often occur -together. It also helps you to discover relationships between different data -points (items). Items that are frequently purchased together or log events that -tend to co-occur are examples of frequent item sets. Finding frequent item sets -helps to discover relationships between different data points (items). +A bucket aggregation which finds frequent item sets. It is a form of association +rules mining that identifies items that often occur together. Items that are +frequently purchased together or log events that tend to co-occur are examples +of frequent item sets. Finding frequent item sets helps to discover +relationships between different data points (items). The aggregation reports closed item sets. A frequent item set is called closed if no superset exists with the same ratio of documents (also known as its @@ -23,6 +22,11 @@ Only the second item set (`apple, orange, banana, tomato`) is returned, and the first set – which is a subset of the second one – is skipped. Both item sets might be returned if their support values are different. +The runtime of the aggregation depends on the data and the provided parameters. +It might take a significant time for the aggregation to complete. For this +reason, it is recommended to use <> to run your +requests asynchronously. + ==== Syntax @@ -55,7 +59,8 @@ A `frequent_items` aggregation looks like this in isolation: ==== Fields Supported field types for the analyzed fields are keyword, numeric, ip, date, -and arrays of these types. You can also add runtime fields to your analyzed fields. +and arrays of these types. You can also add runtime fields to your analyzed +fields. If the combined cardinality of the analyzed fields are high, then the aggregation might require a significant amount of system resources. @@ -113,9 +118,12 @@ and (2.) from which cities they make those purchases. We are interested in sets with three or more items, and want to see the first three frequent item sets with the highest support. +Note that we use the <> endpoint in this first +example. + [source,console] ------------------------------------------------- -GET kibana_sample_data_ecommerce /_search +POST /kibana_sample_data_ecommerce /_async_search { "size": 0, "aggs": { @@ -123,7 +131,7 @@ GET kibana_sample_data_ecommerce /_search "frequent_items": { "minimum_set_size": 3, "fields": [ - { "field": "category.keyword" }, + { "field": "category.keyword" }, { "field": "geoip.city_name" } ], "size": 3 @@ -134,6 +142,15 @@ GET kibana_sample_data_ecommerce /_search ------------------------------------------------- // TEST[skip:setup kibana sample data] +The response of the API call above contains an identifier (`id`) of the async +search request. You can use the identifier to retrieve the search results: + +[source,console] +------------------------------------------------- +GET /_async_search/ +------------------------------------------------- +// TEST[skip:setup kibana sample data] + The API returns a response similar to the following one: [source,console-result] @@ -141,9 +158,9 @@ The API returns a response similar to the following one: (...) "aggregations" : { "my_agg" : { - "buckets" : [ + "buckets" : [ <1> { - "key" : { + "key" : { <2> "category.keyword" : [ "Women's Clothing", "Women's Shoes" @@ -152,8 +169,8 @@ The API returns a response similar to the following one: "New York" ] }, - "doc_count" : 217, - "support" : 0.04641711229946524 + "doc_count" : 217, <3> + "support" : 0.04641711229946524 <4> }, { "key" : { @@ -188,6 +205,13 @@ The API returns a response similar to the following one: ------------------------------------------------- // TEST[skip:setup kibana sample data] +<1> The array of returned item sets. +<2> The `key` object contains one item set. In this case, it consists of two +values of the `category.keyword` field and one value of the `geoip.city_name`. +<3> The number of documents that contain the item set. +<4> The support value of the item set. It is calculated by dividing the number +of documents containing the item set by the total number of documents. + The response shows that the categories customers purchase from most frequently together are `Women's Clothing` and `Women's Shoes` and customers from New York tend to buy items from these categories frequently togeher. In other words, From 92dc846ecc1f1f4ca333374ab9815f4089f32aae Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Mon, 8 Aug 2022 17:21:58 +0300 Subject: [PATCH 131/265] [ML] Extract ML tasks into a context class for use in autoscaling decider (#89167) This commit is a refactoring that encapsulates together the ML tasks that are considered when taking autoscaling decisions. --- .../ml/autoscaling/MlAutoscalingContext.java | 155 ++++++++++++++ .../MlAutoscalingDeciderService.java | 196 ++++-------------- .../xpack/ml/autoscaling/MlScalingReason.java | 39 +--- .../MlAutoscalingDeciderServiceTests.java | 57 +++-- 4 files changed, 239 insertions(+), 208 deletions(-) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java new file mode 100644 index 0000000000000..7c1d55e20399f --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java @@ -0,0 +1,155 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.autoscaling; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; +import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; +import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentState; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeState; +import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskParams; +import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; + +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.core.ml.MlTasks.getDataFrameAnalyticsState; +import static org.elasticsearch.xpack.core.ml.MlTasks.getJobStateModifiedForReassignments; +import static org.elasticsearch.xpack.core.ml.MlTasks.getSnapshotUpgradeState; +import static org.elasticsearch.xpack.ml.job.JobNodeSelector.AWAITING_LAZY_ASSIGNMENT; + +class MlAutoscalingContext { + + final Collection> anomalyDetectionTasks; + final Collection> snapshotUpgradeTasks; + final Collection> dataframeAnalyticsTasks; + final Map modelAssignments; + + final List waitingAnomalyJobs; + final List waitingSnapshotUpgrades; + final List waitingAnalyticsJobs; + final List waitingAllocatedModels; + + MlAutoscalingContext() { + anomalyDetectionTasks = List.of(); + snapshotUpgradeTasks = List.of(); + dataframeAnalyticsTasks = List.of(); + modelAssignments = Map.of(); + + waitingAnomalyJobs = List.of(); + waitingSnapshotUpgrades = List.of(); + waitingAnalyticsJobs = List.of(); + waitingAllocatedModels = List.of(); + } + + MlAutoscalingContext(ClusterState clusterState) { + PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + anomalyDetectionTasks = anomalyDetectionTasks(tasks); + snapshotUpgradeTasks = snapshotUpgradeTasks(tasks); + dataframeAnalyticsTasks = dataframeAnalyticsTasks(tasks); + modelAssignments = TrainedModelAssignmentMetadata.fromState(clusterState).modelAssignments(); + + waitingAnomalyJobs = anomalyDetectionTasks.stream() + .filter(t -> AWAITING_LAZY_ASSIGNMENT.equals(t.getAssignment())) + .map(t -> ((OpenJobAction.JobParams) t.getParams()).getJobId()) + .toList(); + waitingSnapshotUpgrades = snapshotUpgradeTasks.stream() + .filter(t -> AWAITING_LAZY_ASSIGNMENT.equals(t.getAssignment())) + .map(t -> ((SnapshotUpgradeTaskParams) t.getParams()).getJobId()) + .toList(); + waitingAnalyticsJobs = dataframeAnalyticsTasks.stream() + .filter(t -> AWAITING_LAZY_ASSIGNMENT.equals(t.getAssignment())) + .map(t -> ((StartDataFrameAnalyticsAction.TaskParams) t.getParams()).getId()) + .toList(); + waitingAllocatedModels = modelAssignments.entrySet() + .stream() + // TODO: Eventually care about those that are STARTED but not FULLY_ALLOCATED + .filter(e -> e.getValue().getAssignmentState().equals(AssignmentState.STARTING) && e.getValue().getNodeRoutingTable().isEmpty()) + .map(Map.Entry::getKey) + .toList(); + } + + private static Collection> anomalyDetectionTasks( + PersistentTasksCustomMetadata tasksCustomMetadata + ) { + if (tasksCustomMetadata == null) { + return List.of(); + } + + return tasksCustomMetadata.findTasks(MlTasks.JOB_TASK_NAME, t -> taskStateFilter(getJobStateModifiedForReassignments(t))); + } + + private static Collection> snapshotUpgradeTasks( + PersistentTasksCustomMetadata tasksCustomMetadata + ) { + if (tasksCustomMetadata == null) { + return List.of(); + } + + return tasksCustomMetadata.findTasks(MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, t -> taskStateFilter(getSnapshotUpgradeState(t))); + } + + static Collection> dataframeAnalyticsTasks( + PersistentTasksCustomMetadata tasksCustomMetadata + ) { + if (tasksCustomMetadata == null) { + return List.of(); + } + + return tasksCustomMetadata.findTasks(MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, t -> taskStateFilter(getDataFrameAnalyticsState(t))); + } + + private static boolean taskStateFilter(JobState jobState) { + return jobState == null || jobState.isNoneOf(JobState.CLOSED, JobState.FAILED); + } + + private static boolean taskStateFilter(SnapshotUpgradeState snapshotUpgradeState) { + return snapshotUpgradeState == null || snapshotUpgradeState.isNoneOf(SnapshotUpgradeState.STOPPED, SnapshotUpgradeState.FAILED); + } + + private static boolean taskStateFilter(DataFrameAnalyticsState dataFrameAnalyticsState) { + // Don't count stopped and failed df-analytics tasks as they don't consume native memory + return dataFrameAnalyticsState == null + || dataFrameAnalyticsState.isNoneOf(DataFrameAnalyticsState.STOPPED, DataFrameAnalyticsState.FAILED); + } + + public boolean hasWaitingTasks() { + return waitingAnomalyJobs.isEmpty() == false + || waitingSnapshotUpgrades.isEmpty() == false + || waitingAnalyticsJobs.isEmpty() + || waitingAllocatedModels.isEmpty() == false; + } + + public boolean isEmpty() { + return anomalyDetectionTasks.isEmpty() + && snapshotUpgradeTasks.isEmpty() + && dataframeAnalyticsTasks.isEmpty() + && modelAssignments.isEmpty(); + } + + public List findPartiallyAllocatedModels() { + return modelAssignments.entrySet() + .stream() + .filter( + e -> e.getValue() + .calculateAllocationStatus() + .map(AllocationStatus::calculateState) + .orElse(AllocationStatus.State.FULLY_ALLOCATED) + .equals(AllocationStatus.State.FULLY_ALLOCATED) == false + ) + .map(Map.Entry::getKey) + .toList(); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java index 178313f474aa0..00c8424405349 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java @@ -28,19 +28,10 @@ import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingDeciderResult; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingDeciderService; import org.elasticsearch.xpack.core.ml.MlTasks; -import org.elasticsearch.xpack.core.ml.action.OpenJobAction; -import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction.DatafeedParams; -import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; -import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; -import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; -import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeState; -import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskParams; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.job.NodeLoad; import org.elasticsearch.xpack.ml.job.NodeLoadDetector; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; @@ -70,12 +61,8 @@ import static java.time.Instant.ofEpochMilli; import static org.elasticsearch.common.xcontent.XContentElasticsearchExtension.DEFAULT_FORMATTER; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.core.ml.MlTasks.getDataFrameAnalyticsState; -import static org.elasticsearch.xpack.core.ml.MlTasks.getJobStateModifiedForReassignments; -import static org.elasticsearch.xpack.core.ml.MlTasks.getSnapshotUpgradeState; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_OPEN_JOBS_PER_NODE; import static org.elasticsearch.xpack.ml.MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD; -import static org.elasticsearch.xpack.ml.job.JobNodeSelector.AWAITING_LAZY_ASSIGNMENT; public class MlAutoscalingDeciderService implements AutoscalingDeciderService, LocalNodeMasterListener { @@ -262,30 +249,6 @@ static Optional>> determineUnassignab ); } - private static Collection> anomalyDetectionTasks(PersistentTasksCustomMetadata tasksCustomMetadata) { - if (tasksCustomMetadata == null) { - return List.of(); - } - - return tasksCustomMetadata.findTasks(MlTasks.JOB_TASK_NAME, t -> taskStateFilter(getJobStateModifiedForReassignments(t))); - } - - private static Collection> snapshotUpgradeTasks(PersistentTasksCustomMetadata tasksCustomMetadata) { - if (tasksCustomMetadata == null) { - return List.of(); - } - - return tasksCustomMetadata.findTasks(MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, t -> taskStateFilter(getSnapshotUpgradeState(t))); - } - - private static Collection> dataframeAnalyticsTasks(PersistentTasksCustomMetadata tasksCustomMetadata) { - if (tasksCustomMetadata == null) { - return List.of(); - } - - return tasksCustomMetadata.findTasks(MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, t -> taskStateFilter(getDataFrameAnalyticsState(t))); - } - @SuppressWarnings("unchecked") private static Collection> datafeedTasks(PersistentTasksCustomMetadata tasksCustomMetadata) { if (tasksCustomMetadata == null) { @@ -388,41 +351,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider final ClusterState clusterState = context.state(); PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - Collection> anomalyDetectionTasks = anomalyDetectionTasks(tasks); - Collection> snapshotUpgradeTasks = snapshotUpgradeTasks(tasks); - Collection> dataframeAnalyticsTasks = dataframeAnalyticsTasks(tasks); - Map modelAssignments = TrainedModelAssignmentMetadata.fromState(clusterState).modelAssignments(); - final List waitingAnomalyJobs = anomalyDetectionTasks.stream() - .filter(t -> AWAITING_LAZY_ASSIGNMENT.equals(t.getAssignment())) - .map(t -> ((OpenJobAction.JobParams) t.getParams()).getJobId()) - .toList(); - final List waitingSnapshotUpgrades = snapshotUpgradeTasks.stream() - .filter(t -> AWAITING_LAZY_ASSIGNMENT.equals(t.getAssignment())) - .map(t -> ((SnapshotUpgradeTaskParams) t.getParams()).getJobId()) - .toList(); - final List waitingAnalyticsJobs = dataframeAnalyticsTasks.stream() - .filter(t -> AWAITING_LAZY_ASSIGNMENT.equals(t.getAssignment())) - .map(t -> ((StartDataFrameAnalyticsAction.TaskParams) t.getParams()).getId()) - .toList(); - final List waitingAllocatedModels = modelAssignments.entrySet() - .stream() - // TODO: Eventually care about those that are STARTED but not FULLY_ALLOCATED - .filter(e -> e.getValue().getAssignmentState().equals(AssignmentState.STARTING) && e.getValue().getNodeRoutingTable().isEmpty()) - .map(Map.Entry::getKey) - .toList(); - // TODO for autoscaling by memory, we only care about if the model is allocated to at least one node (see above) - // We should do this check in our autoscaling by processor count service, which will be a separate decider for readability's sake - final List notFullyAllocatedModels = modelAssignments.entrySet() - .stream() - .filter( - e -> e.getValue() - .calculateAllocationStatus() - .map(AllocationStatus::calculateState) - .orElse(AllocationStatus.State.FULLY_ALLOCATED) - .equals(AllocationStatus.State.FULLY_ALLOCATED) == false - ) - .map(Map.Entry::getKey) - .toList(); + MlAutoscalingContext mlContext = new MlAutoscalingContext(clusterState); final int numAnalyticsJobsInQueue = NUM_ANALYTICS_JOBS_IN_QUEUE.get(configuration); final int numAnomalyJobsInQueue = NUM_ANOMALY_JOBS_IN_QUEUE.get(configuration); @@ -430,11 +359,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider final List mlNodes = getMlNodes(clusterState); final NativeMemoryCapacity currentScale = currentScale(mlNodes); - final MlScalingReason.Builder reasonBuilder = MlScalingReason.builder() - .setWaitingAnomalyJobs(waitingAnomalyJobs) - .setWaitingSnapshotUpgrades(waitingSnapshotUpgrades) - .setWaitingAnalyticsJobs(waitingAnalyticsJobs) - .setWaitingModels(waitingAllocatedModels) + final MlScalingReason.Builder reasonBuilder = MlScalingReason.builder(mlContext) .setCurrentMlCapacity( currentScale.autoscalingCapacity( maxMachineMemoryPercent, @@ -446,27 +371,14 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider .setPassedConfiguration(configuration); // There are no ML nodes, scale up as quick as possible, no matter if memory is stale or not - if (mlNodes.isEmpty() - && (waitingAnomalyJobs.isEmpty() == false - || waitingSnapshotUpgrades.isEmpty() == false - || waitingAnalyticsJobs.isEmpty() == false - || waitingAllocatedModels.isEmpty() == false)) { - return scaleUpFromZero( - waitingAnomalyJobs, - waitingSnapshotUpgrades, - waitingAnalyticsJobs, - waitingAllocatedModels, - reasonBuilder - ); + if (mlNodes.isEmpty() && mlContext.hasWaitingTasks()) { + return scaleUpFromZero(mlContext, reasonBuilder); } // We don't need to check anything as there are no tasks // This is a quick path to downscale. // simply return `0` for scale down if delay is satisfied - if (anomalyDetectionTasks.isEmpty() - && snapshotUpgradeTasks.isEmpty() - && dataframeAnalyticsTasks.isEmpty() - && modelAssignments.isEmpty()) { + if (mlContext.isEmpty()) { // We might be in a need zero, have zero situation, in which case it's nicer to pass a "no change" explanation if (currentScale.getTierMlNativeMemoryRequirementExcludingOverhead() == 0 && currentScale.getNodeMlNativeMemoryRequirementExcludingOverhead() == 0) { @@ -542,10 +454,10 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider numAnomalyJobsInQueue, numAnalyticsJobsInQueue, nodeLoads, - waitingAnomalyJobs, - waitingSnapshotUpgrades, - waitingAnalyticsJobs, - waitingAllocatedModels, + mlContext.waitingAnomalyJobs, + mlContext.waitingSnapshotUpgrades, + mlContext.waitingAnalyticsJobs, + mlContext.waitingAllocatedModels, calculateFutureAvailableCapacity(tasks, nodeLoads).orElse(null), currentScale, reasonBuilder @@ -555,10 +467,14 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider return scaleUpDecision.get(); } - if (waitingAnalyticsJobs.isEmpty() == false - || waitingSnapshotUpgrades.isEmpty() == false - || waitingAnomalyJobs.isEmpty() == false - || notFullyAllocatedModels.isEmpty() == false) { + final List partiallyAllocatedModels = mlContext.findPartiallyAllocatedModels(); + + // TODO for autoscaling by memory, we only care about if the model is allocated to at least one node (see above) + // We should do this check in our autoscaling by processor count service, which will be a separate decider for readability's sake + if (mlContext.waitingAnalyticsJobs.isEmpty() == false + || mlContext.waitingSnapshotUpgrades.isEmpty() == false + || mlContext.waitingAnomalyJobs.isEmpty() == false + || partiallyAllocatedModels.isEmpty() == false) { // We don't want to continue to consider a scale down if there are now waiting jobs resetScaleDownCoolDown(); return new AutoscalingDeciderResult( @@ -571,21 +487,16 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider + "[%d] trained models not fully-allocated, " + "but the number in the queue is less than the configured maximum allowed " + "or the queued jobs will eventually be assignable at the current size.", - waitingSnapshotUpgrades.size(), - waitingAnalyticsJobs.size(), - waitingAnomalyJobs.size(), - notFullyAllocatedModels.size() + mlContext.waitingSnapshotUpgrades.size(), + mlContext.waitingAnalyticsJobs.size(), + mlContext.waitingAnomalyJobs.size(), + partiallyAllocatedModels.size() ) ).build() ); } - long maxTaskMemoryBytes = maxMemoryBytes( - anomalyDetectionTasks, - snapshotUpgradeTasks, - dataframeAnalyticsTasks, - modelAssignments.values() - ); + long maxTaskMemoryBytes = maxMemoryBytes(mlContext); // This state is invalid, but may occur due to complex bugs that have slipped through testing. // We could have tasks where the required job memory is 0, which should be impossible. @@ -596,17 +507,14 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider if (maxTaskMemoryBytes == 0L) { // We shouldn't need to check this condition because it's the exact opposite of the condition that // would have sent us down the scale down to zero branch higher up this method. - assert anomalyDetectionTasks.isEmpty() == false - || snapshotUpgradeTasks.isEmpty() == false - || dataframeAnalyticsTasks.isEmpty() == false - || modelAssignments.isEmpty() == false : "No tasks or models at all should have put us in the scale down to zero branch"; + assert mlContext.isEmpty() == false : "No tasks or models at all should have put us in the scale down to zero branch"; logger.warn( "The calculated minimum required node size was unexpectedly [0] as there are [{}] anomaly job tasks, " + "[{}] model snapshot upgrade tasks, [{}] data frame analytics tasks and [{}] model assignments", - anomalyDetectionTasks.size(), - snapshotUpgradeTasks.size(), - dataframeAnalyticsTasks.size(), - modelAssignments.size() + mlContext.anomalyDetectionTasks.size(), + mlContext.snapshotUpgradeTasks.size(), + mlContext.dataframeAnalyticsTasks.size(), + mlContext.modelAssignments.size() ); // This next message could obviously be pretty big, but should only get logged very rarely as it // requires both debug enabled and some other bug to exist to cause us to be in this branch @@ -639,7 +547,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider return null; } // TODO we should remove this when we can auto-scale (down and up) via a new CPU auto-scaling decider - if (modelAssignmentsRequireMoreThanHalfCpu(modelAssignments.values(), mlNodes)) { + if (modelAssignmentsRequireMoreThanHalfCpu(mlContext.modelAssignments.values(), mlNodes)) { logger.debug("not down-scaling; model assignments require more than half of the ML tier's allocated processors"); return null; } @@ -706,14 +614,9 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider ); } - private long maxMemoryBytes( - Collection> anomalyDetectionTasks, - Collection> snapshotUpgradeTasks, - Collection> dataframeAnalyticsTasks, - Collection modelAssignments - ) { + private long maxMemoryBytes(MlAutoscalingContext mlContext) { long maxMemoryBytes = Math.max( - anomalyDetectionTasks.stream() + mlContext.anomalyDetectionTasks.stream() .filter(PersistentTask::isAssigned) // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used .mapToLong(t -> { @@ -723,7 +626,7 @@ private long maxMemoryBytes( }) .max() .orElse(0L), - snapshotUpgradeTasks.stream() + mlContext.snapshotUpgradeTasks.stream() .filter(PersistentTask::isAssigned) // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used .mapToLong(t -> { @@ -736,7 +639,7 @@ private long maxMemoryBytes( ); maxMemoryBytes = Math.max( maxMemoryBytes, - dataframeAnalyticsTasks.stream() + mlContext.dataframeAnalyticsTasks.stream() .filter(PersistentTask::isAssigned) // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used .mapToLong(t -> { @@ -749,7 +652,7 @@ private long maxMemoryBytes( ); maxMemoryBytes = Math.max( maxMemoryBytes, - modelAssignments.stream().mapToLong(t -> t.getTaskParams().estimateMemoryUsageBytes()).max().orElse(0L) + mlContext.modelAssignments.values().stream().mapToLong(t -> t.getTaskParams().estimateMemoryUsageBytes()).max().orElse(0L) ); return maxMemoryBytes; } @@ -803,30 +706,24 @@ static boolean modelAssignmentsRequireMoreThanHalfCpu(Collection waitingAnomalyJobs, - List waitingSnapshotUpgrades, - List waitingAnalyticsJobs, - List waitingAllocatedModels, - MlScalingReason.Builder reasonBuilder - ) { + AutoscalingDeciderResult scaleUpFromZero(MlAutoscalingContext mlContext, MlScalingReason.Builder reasonBuilder) { final Optional analyticsCapacity = requiredCapacityExcludingPerNodeOverheadForUnassignedJobs( - waitingAnalyticsJobs, + mlContext.waitingAnalyticsJobs, this::getAnalyticsMemoryRequirement, 0 ); final Optional anomalyCapacity = requiredCapacityExcludingPerNodeOverheadForUnassignedJobs( - waitingAnomalyJobs, + mlContext.waitingAnomalyJobs, this::getAnomalyMemoryRequirement, 0 ); final Optional snapshotUpgradeCapacity = requiredCapacityExcludingPerNodeOverheadForUnassignedJobs( - waitingSnapshotUpgrades, + mlContext.waitingSnapshotUpgrades, this::getAnomalyMemoryRequirement, 0 ); final Optional allocatedModelCapacity = requiredCapacityExcludingPerNodeOverheadForUnassignedJobs( - waitingAllocatedModels, + mlContext.waitingAllocatedModels, this::getAllocatedModelRequirement, 0 ); @@ -1141,7 +1038,8 @@ Optional calculateFutureAvailableCapacity(PersistentTasksC final List> jobsWithLookbackDatafeeds = datafeedTasks(tasks).stream() .filter(t -> t.getParams().getEndTime() != null && t.getExecutorNode() != null) .toList(); - final List> assignedAnalyticsJobs = dataframeAnalyticsTasks(tasks).stream() + final List> assignedAnalyticsJobs = MlAutoscalingContext.dataframeAnalyticsTasks(tasks) + .stream() .filter(t -> t.getExecutorNode() != null) .toList(); @@ -1274,18 +1172,4 @@ public List> deciderSettings() { public List roles() { return List.of(DiscoveryNodeRole.ML_ROLE); } - - private static boolean taskStateFilter(JobState jobState) { - return jobState == null || jobState.isNoneOf(JobState.CLOSED, JobState.FAILED); - } - - private static boolean taskStateFilter(SnapshotUpgradeState snapshotUpgradeState) { - return snapshotUpgradeState == null || snapshotUpgradeState.isNoneOf(SnapshotUpgradeState.STOPPED, SnapshotUpgradeState.FAILED); - } - - private static boolean taskStateFilter(DataFrameAnalyticsState dataFrameAnalyticsState) { - // Don't count stopped and failed df-analytics tasks as they don't consume native memory - return dataFrameAnalyticsState == null - || dataFrameAnalyticsState.isNoneOf(DataFrameAnalyticsState.STOPPED, DataFrameAnalyticsState.FAILED); - } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java index c42f3dc83e52f..c5fb0d22e439a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java @@ -86,8 +86,8 @@ public MlScalingReason(StreamInput in) throws IOException { this.simpleReason = ExceptionsHelper.requireNonNull(simpleReason, REASON); } - public static Builder builder() { - return new Builder(); + public static Builder builder(MlAutoscalingContext mlContext) { + return new Builder(mlContext); } @Override @@ -175,10 +175,7 @@ public boolean isFragment() { } static class Builder { - private List waitingAnalyticsJobs = Collections.emptyList(); - private List waitingAnomalyJobs = Collections.emptyList(); - private List waitingSnapshotUpgrades = Collections.emptyList(); - private List waitingModels = Collections.emptyList(); + private final MlAutoscalingContext mlContext; private Settings passedConfiguration; private Long largestWaitingAnalyticsJob; private Long largestWaitingAnomalyJob; @@ -186,27 +183,11 @@ static class Builder { private AutoscalingCapacity requiredCapacity; private String simpleReason; - public Builder setWaitingAnalyticsJobs(List waitingAnalyticsJobs) { - this.waitingAnalyticsJobs = waitingAnalyticsJobs; - return this; - } - - public Builder setWaitingAnomalyJobs(List waitingAnomalyJobs) { - this.waitingAnomalyJobs = waitingAnomalyJobs; - return this; - } - - public Builder setWaitingSnapshotUpgrades(List waitingSnapshotUpgrades) { - this.waitingSnapshotUpgrades = waitingSnapshotUpgrades; - return this; - } - - public Builder setWaitingModels(List waitingModels) { - this.waitingModels = waitingModels; - return this; + Builder(MlAutoscalingContext mlContext) { + this.mlContext = Objects.requireNonNull(mlContext); } - public Builder setPassedConfiguration(Settings passedConfiguration) { + Builder setPassedConfiguration(Settings passedConfiguration) { this.passedConfiguration = passedConfiguration; return this; } @@ -238,10 +219,10 @@ public Builder setRequiredCapacity(AutoscalingCapacity requiredCapacity) { public MlScalingReason build() { return new MlScalingReason( - waitingAnalyticsJobs, - waitingAnomalyJobs, - waitingSnapshotUpgrades, - waitingModels, + mlContext.waitingAnalyticsJobs, + mlContext.waitingAnomalyJobs, + mlContext.waitingSnapshotUpgrades, + mlContext.waitingAllocatedModels, passedConfiguration, largestWaitingAnalyticsJob, largestWaitingAnomalyJob, diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java index 4b03d38b01419..eefdc992f9c45 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java @@ -195,7 +195,9 @@ public void testScalingEdgeCase() { .incNumAssignedAnomalyDetectorJobs() .build() ); - MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) + MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration( + Settings.EMPTY + ) .setCurrentMlCapacity( AutoscalingCapacity.builder() .node(null, AUTO_NODE_TIERS_NO_MONITORING.get(0).v1(), null) @@ -244,7 +246,7 @@ public void testScalingEdgeCase() { .incNumAssignedAnomalyDetectorJobs() .build() ); - reasonBuilder = new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) + reasonBuilder = new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration(Settings.EMPTY) .setCurrentMlCapacity(AutoscalingCapacity.builder().node(null, 2147483648L, null).total(null, 2147483648L, null).build()); AutoscalingDeciderResult result = service.checkForScaleDown( nodeForScaleDown, @@ -318,7 +320,7 @@ public void testScaleStability() { List.of(), null, new NativeMemoryCapacity(lowerTierMemoryForMl, lowerTierMemoryForMl, lowerTierJvmSize), - new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) + new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration(Settings.EMPTY) .setCurrentMlCapacity( AutoscalingCapacity.builder().node(null, lowerTierNodeSize, null).total(null, lowerTierNodeSize, null).build() ) @@ -351,7 +353,7 @@ public void testScaleStability() { List.of(nodeLoadForScaleDown), maxJobSize, new NativeMemoryCapacity(scaledUpBytesForMl, scaledUpBytesForMl, scaledUpJvmSize), - new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) + new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration(Settings.EMPTY) .setCurrentMlCapacity( AutoscalingCapacity.builder().node(null, scaledUpSize, null).total(null, scaledUpSize, null).build() ) @@ -395,7 +397,7 @@ public void testScaleUp_withNoJobsWaitingNoMlNodes() { List.of(), null, NativeMemoryCapacity.ZERO, // current scale when there are no ML nodes - MlScalingReason.builder() + MlScalingReason.builder(new MlAutoscalingContext()) ), equalTo(Optional.empty()) ); @@ -422,8 +424,9 @@ public void testScaleUp_withWaitingJobsAndAutoMemoryAndNoRoomInNodes() { .build() ); NativeMemoryCapacity currentScale = new NativeMemoryCapacity(anomalyDetectorJobSize.getBytes(), anomalyDetectorJobSize.getBytes()); - MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) - .setCurrentMlCapacity(AutoscalingCapacity.ZERO); + MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration( + Settings.EMPTY + ).setCurrentMlCapacity(AutoscalingCapacity.ZERO); MlAutoscalingDeciderService service = buildService(); service.setUseAuto(true); { // No time in queue @@ -544,8 +547,9 @@ public void testScaleUp_withWaitingSnapshotUpgradesAndAutoMemoryAndNoRoomInNodes .build() ); NativeMemoryCapacity currentScale = new NativeMemoryCapacity(ByteSizeValue.ofGb(1).getBytes(), ByteSizeValue.ofGb(1).getBytes()); - MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) - .setCurrentMlCapacity(AutoscalingCapacity.ZERO); + MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration( + Settings.EMPTY + ).setCurrentMlCapacity(AutoscalingCapacity.ZERO); MlAutoscalingDeciderService service = buildService(); service.setUseAuto(true); { // No time in queue @@ -643,8 +647,9 @@ public void testScaleUp_withWaitingSnapshotUpgradesAndAutoMemoryAndNoRoomInNodes public void testScaleUp_withWaitingJobsAndRoomInNodes() { List jobTasks = List.of("waiting_job", "waiting_job_2"); List analytics = List.of("analytics_waiting"); - MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) - .setCurrentMlCapacity(AutoscalingCapacity.ZERO); + MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration( + Settings.EMPTY + ).setCurrentMlCapacity(AutoscalingCapacity.ZERO); // Two small nodes in cluster, so simulate two availability zones when(nodeAvailabilityZoneMapper.getNumMlAvailabilityZones()).thenReturn(OptionalInt.of(2)); List nodesWithRoom = List.of( @@ -722,8 +727,9 @@ public void testScaleUp_withWaitingJobsAndRoomInNodes() { public void testScaleUp_withWaitingJobsAndNoRoomInNodes() { List jobTasks = List.of("waiting_job", "waiting_job_2"); List analytics = List.of("analytics_waiting"); - MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) - .setCurrentMlCapacity(AutoscalingCapacity.ZERO); + MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration( + Settings.EMPTY + ).setCurrentMlCapacity(AutoscalingCapacity.ZERO); List fullyLoadedNode = List.of( NodeLoad.builder("any") .setMaxMemory(ByteSizeValue.ofGb(1).getBytes() + PER_NODE_OVERHEAD) @@ -814,8 +820,9 @@ public void testScaleUp_withWaitingJobsAndNoRoomInNodes() { public void testScaleUp_withWaitingJobsAndSomeRoomInNodes() { List jobTasks = List.of("waiting_job"); List analytics = List.of("analytics_waiting"); - MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) - .setCurrentMlCapacity(AutoscalingCapacity.ZERO); + MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration( + Settings.EMPTY + ).setCurrentMlCapacity(AutoscalingCapacity.ZERO); List nearlyFullyLoadedNode = List.of( // Free space on this node is _nearly_ enough for another job but not quite NodeLoad.builder("any") @@ -900,8 +907,9 @@ public void testScaleUp_withWaitingJobsAndSomeRoomInNodes() { public void testScaleUp_withWaitingJobs_WithFutureCapacity() { List jobTasks = List.of("waiting_job", "waiting_job_2"); List analytics = List.of("analytics_waiting"); - MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) - .setCurrentMlCapacity(AutoscalingCapacity.ZERO); + MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration( + Settings.EMPTY + ).setCurrentMlCapacity(AutoscalingCapacity.ZERO); List fullyLoadedNode = List.of( NodeLoad.builder("any") .setMaxMemory(ByteSizeValue.ofGb(1).getBytes()) @@ -980,8 +988,9 @@ public void testScaleUp_withWaitingModelAndAutoMemoryAndNoRoomInNodes() { .build() ); NativeMemoryCapacity currentScale = new NativeMemoryCapacity(ByteSizeValue.ofGb(1).getBytes(), ByteSizeValue.ofGb(1).getBytes()); - MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) - .setCurrentMlCapacity(AutoscalingCapacity.ZERO); + MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration( + Settings.EMPTY + ).setCurrentMlCapacity(AutoscalingCapacity.ZERO); MlAutoscalingDeciderService service = buildService(); service.setUseAuto(true); Optional decision = service.checkForScaleUp( @@ -1015,8 +1024,9 @@ public void testScaleUp_withWaitingModelAndAutoMemoryAndNoRoomInNodes() { } public void testScaleUp_withWaitingModelsAndRoomInNodes() { - MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) - .setCurrentMlCapacity(AutoscalingCapacity.ZERO); + MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration( + Settings.EMPTY + ).setCurrentMlCapacity(AutoscalingCapacity.ZERO); // Two small nodes in cluster, so simulate two availability zones when(nodeAvailabilityZoneMapper.getNumMlAvailabilityZones()).thenReturn(OptionalInt.of(2)); List nodesWithRoom = List.of( @@ -1068,8 +1078,9 @@ public void testScaleDown() { when(nodeAvailabilityZoneMapper.getNumMlAvailabilityZones()).thenReturn(OptionalInt.of(3)); MlAutoscalingDeciderService service = buildService(); service.setMaxMachineMemoryPercent(25); - MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder().setPassedConfiguration(Settings.EMPTY) - .setCurrentMlCapacity(AutoscalingCapacity.ZERO); + MlScalingReason.Builder reasonBuilder = new MlScalingReason.Builder(new MlAutoscalingContext()).setPassedConfiguration( + Settings.EMPTY + ).setCurrentMlCapacity(AutoscalingCapacity.ZERO); { // Current capacity allows for smaller node List nodeLoads = List.of( NodeLoad.builder("foo") From cfad420cde4b576faf8a857db54afeee160eaf88 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 8 Aug 2022 11:14:26 -0400 Subject: [PATCH 132/265] Enable BloomFilter for _id of non-datastream indices (#88409) This PR adds BloomFilter to Elasticsearch and enables it for the _id field of non-data stream indices. BloomFilter should speed up the performance of mget and update requests at a small expense of refresh, merge, and storage. --- docs/changelog/88409.yaml | 5 + .../indices/IndexingMemoryControllerIT.java | 3 +- server/src/main/java/module-info.java | 3 + .../common/settings/IndexScopedSettings.java | 1 + .../elasticsearch/index/IndexSettings.java | 12 + .../index/codec/CodecService.java | 7 +- .../index/codec/PerFieldMapperCodec.java | 29 +- .../ES85BloomFilterPostingsFormat.java | 568 ++++++++++++++++++ .../elasticsearch/index/shard/IndexShard.java | 2 +- .../index/store/LuceneFilesExtensions.java | 3 + .../org.apache.lucene.codecs.PostingsFormat | 1 + .../elasticsearch/index/codec/CodecTests.java | 3 +- .../ES85BloomFilterPostingsFormatTests.java | 127 ++++ .../index/engine/InternalEngineTests.java | 6 +- .../mapper/CompletionFieldMapperTests.java | 3 +- .../vectors/DenseVectorFieldMapperTests.java | 3 +- .../index/shard/IndexShardTests.java | 3 +- .../index/shard/RefreshListenersTests.java | 2 +- .../IndexingMemoryControllerTests.java | 3 +- .../index/engine/EngineTestCase.java | 16 +- .../elasticsearch/test/ESIntegTestCase.java | 4 +- .../action/TransportResumeFollowAction.java | 3 +- .../index/engine/FollowingEngineTests.java | 2 +- 23 files changed, 781 insertions(+), 28 deletions(-) create mode 100644 docs/changelog/88409.yaml create mode 100644 server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormat.java create mode 100644 server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat create mode 100644 server/src/test/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormatTests.java diff --git a/docs/changelog/88409.yaml b/docs/changelog/88409.yaml new file mode 100644 index 0000000000000..9904c14409779 --- /dev/null +++ b/docs/changelog/88409.yaml @@ -0,0 +1,5 @@ +pr: 88409 +summary: Enable `BloomFilter` for `_id` of non-datastream indices +area: Search +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java index f6434ad393238..7476d31ead9b5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; @@ -62,7 +63,7 @@ EngineConfig engineConfigWithLargerIndexingMemory(EngineConfig config) { config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null), + new CodecService(null, BigArrays.NON_RECYCLING_INSTANCE), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 81f530a4e2c1c..28391730f7e14 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -224,6 +224,7 @@ exports org.elasticsearch.index.cache.query; exports org.elasticsearch.index.cache.request; exports org.elasticsearch.index.codec; + exports org.elasticsearch.index.codec.bloomfilter; exports org.elasticsearch.index.engine; exports org.elasticsearch.index.fielddata; exports org.elasticsearch.index.fielddata.fieldcomparator; @@ -362,4 +363,6 @@ org.elasticsearch.index.shard.ShardToolCliProvider; uses org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider; + + provides org.apache.lucene.codecs.PostingsFormat with org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat; } diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 51d48987a8876..70f3110d5cf8e 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -162,6 +162,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS, ShardLimitValidator.INDEX_SETTING_SHARD_LIMIT_GROUP, DataTier.TIER_PREFERENCE_SETTING, + IndexSettings.BLOOM_FILTER_ID_FIELD_ENABLED_SETTING, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 58253c3c48032..e323cfe2e49de 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -455,6 +455,18 @@ public final class IndexSettings { Setting.Property.IndexScope ); + /** + * This index setting is intentionally undocumented and should be used as an escape hatch to disable BloomFilter of the + * _id field of non-data-stream indices, which is enabled by default. This setting doesn't affect data-stream indices. + */ + public static final Setting BLOOM_FILTER_ID_FIELD_ENABLED_SETTING = Setting.boolSetting( + "index.bloom_filter_for_id_field.enabled", + true, + Setting.Property.Dynamic, + Setting.Property.IndexScope, + Property.DeprecatedWarning + ); + /** * Is the {@code index.mode} enabled? It should only be enbaled if you * pass a jvm parameter or are running a snapshot build. diff --git a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java index bd9dfeca6d694..e3027bb5e0e3c 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -10,6 +10,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene92.Lucene92Codec; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.MapperService; @@ -31,14 +32,14 @@ public class CodecService { /** the raw unfiltered lucene default. useful for testing */ public static final String LUCENE_DEFAULT_CODEC = "lucene_default"; - public CodecService(@Nullable MapperService mapperService) { + public CodecService(@Nullable MapperService mapperService, BigArrays bigArrays) { final var codecs = new HashMap(); if (mapperService == null) { codecs.put(DEFAULT_CODEC, new Lucene92Codec()); codecs.put(BEST_COMPRESSION_CODEC, new Lucene92Codec(Lucene92Codec.Mode.BEST_COMPRESSION)); } else { - codecs.put(DEFAULT_CODEC, new PerFieldMapperCodec(Lucene92Codec.Mode.BEST_SPEED, mapperService)); - codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMapperCodec(Lucene92Codec.Mode.BEST_COMPRESSION, mapperService)); + codecs.put(DEFAULT_CODEC, new PerFieldMapperCodec(Lucene92Codec.Mode.BEST_SPEED, mapperService, bigArrays)); + codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMapperCodec(Lucene92Codec.Mode.BEST_COMPRESSION, mapperService, bigArrays)); } codecs.put(LUCENE_DEFAULT_CODEC, Codec.getDefault()); for (String codec : Codec.availableCodecs()) { diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java index 517280dce73a2..9f39b05d15f0f 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java @@ -15,6 +15,10 @@ import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; import org.apache.lucene.codecs.lucene92.Lucene92Codec; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; @@ -31,24 +35,39 @@ public class PerFieldMapperCodec extends Lucene92Codec { private final MapperService mapperService; private final DocValuesFormat docValuesFormat = new Lucene90DocValuesFormat(); + private final ES85BloomFilterPostingsFormat bloomFilterPostingsFormat; static { assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMapperCodec.class) : "PerFieldMapperCodec must subclass the latest " + "lucene codec: " + Lucene.LATEST_CODEC; } - public PerFieldMapperCodec(Mode compressionMode, MapperService mapperService) { + public PerFieldMapperCodec(Mode compressionMode, MapperService mapperService, BigArrays bigArrays) { super(compressionMode); this.mapperService = mapperService; + this.bloomFilterPostingsFormat = new ES85BloomFilterPostingsFormat(bigArrays, this::internalGetPostingsFormatForField); } @Override public PostingsFormat getPostingsFormatForField(String field) { - PostingsFormat format = mapperService.mappingLookup().getPostingsFormat(field); - if (format == null) { - return super.getPostingsFormatForField(field); + if (useBloomFilter(field)) { + return bloomFilterPostingsFormat; } - return format; + return internalGetPostingsFormatForField(field); + } + + private PostingsFormat internalGetPostingsFormatForField(String field) { + final PostingsFormat format = mapperService.mappingLookup().getPostingsFormat(field); + if (format != null) { + return format; + } + return super.getPostingsFormatForField(field); + } + + private boolean useBloomFilter(String field) { + return IdFieldMapper.NAME.equals(field) + && mapperService.mappingLookup().isDataStreamTimestampFieldEnabled() == false + && IndexSettings.BLOOM_FILTER_ID_FIELD_ENABLED_SETTING.get(mapperService.getIndexSettings().getSettings()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormat.java new file mode 100644 index 0000000000000..50b750823ee3a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormat.java @@ -0,0 +1,568 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.bloomfilter; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.FieldsConsumer; +import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.NormsProducer; +import org.apache.lucene.codecs.PostingsFormat; +import org.apache.lucene.index.BaseTermsEnum; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.FilterLeafReader; +import org.apache.lucene.index.ImpactsEnum; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.TermState; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.store.ChecksumIndexInput; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.RandomAccessInput; +import org.apache.lucene.util.AttributeSource; +import org.apache.lucene.util.BitUtil; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ByteArray; +import org.elasticsearch.core.IOUtils; + +import java.io.Closeable; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; + +/** + * This implementation is forked from Lucene's BloomFilterPosting to support on-disk bloom filters. + *

+ * A {@link PostingsFormat} useful for low doc-frequency fields such as primary keys. Bloom filters + * offers "fast-fail" for reads in segments known to have no record of the key. + */ +public class ES85BloomFilterPostingsFormat extends PostingsFormat { + static final String BLOOM_CODEC_NAME = "ES85BloomFilter"; + static final int VERSION_START = 0; + static final int VERSION_CURRENT = VERSION_START; + static final String BLOOM_FILTER_META_FILE = "bfm"; + static final String BLOOM_FILTER_INDEX_FILE = "bfi"; + + private Function postingsFormats; + private BigArrays bigArrays; + + public ES85BloomFilterPostingsFormat(BigArrays bigArrays, Function postingsFormats) { + this(); + this.bigArrays = Objects.requireNonNull(bigArrays); + this.postingsFormats = Objects.requireNonNull(postingsFormats); + } + + public ES85BloomFilterPostingsFormat() { + super(BLOOM_CODEC_NAME); + } + + @Override + public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { + if (postingsFormats == null || bigArrays == null) { + assert false : BLOOM_CODEC_NAME + " was initialized with a wrong constructor"; + throw new UnsupportedOperationException(BLOOM_CODEC_NAME + " was initialized with a wrong constructor"); + } + return new FieldsWriter(state); + } + + @Override + public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + return new FieldsReader(state); + } + + @Override + public String toString() { + return BLOOM_CODEC_NAME; + } + + private static String metaFile(SegmentInfo si, String segmentSuffix) { + return IndexFileNames.segmentFileName(si.name, segmentSuffix, BLOOM_FILTER_META_FILE); + } + + private static String indexFile(SegmentInfo si, String segmentSuffix) { + return IndexFileNames.segmentFileName(si.name, segmentSuffix, BLOOM_FILTER_INDEX_FILE); + } + + final class FieldsWriter extends FieldsConsumer { + private final SegmentWriteState state; + private final IndexOutput indexOut; + private final List bloomFilters = new ArrayList<>(); + private final List fieldsGroups = new ArrayList<>(); + private final List toCloses = new ArrayList<>(); + private boolean closed; + + FieldsWriter(SegmentWriteState state) throws IOException { + this.state = state; + boolean success = false; + try { + indexOut = state.directory.createOutput(indexFile(state.segmentInfo, state.segmentSuffix), state.context); + toCloses.add(indexOut); + CodecUtil.writeIndexHeader(indexOut, BLOOM_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(toCloses); + } + } + } + + @Override + public void write(Fields fields, NormsProducer norms) throws IOException { + writePostings(fields, norms); + writeBloomFilters(fields); + } + + private void writePostings(Fields fields, NormsProducer norms) throws IOException { + final Map currentGroups = new HashMap<>(); + for (String field : fields) { + final PostingsFormat postingsFormat = postingsFormats.apply(field); + if (postingsFormat == null) { + throw new IllegalStateException("PostingsFormat for field [" + field + "] wasn't specified"); + } + FieldsGroup group = currentGroups.get(postingsFormat); + if (group == null) { + group = new FieldsGroup(postingsFormat, Integer.toString(fieldsGroups.size()), new ArrayList<>()); + currentGroups.put(postingsFormat, group); + fieldsGroups.add(group); + } + group.fields.add(field); + } + for (FieldsGroup group : currentGroups.values()) { + final FieldsConsumer writer = group.postingsFormat.fieldsConsumer(new SegmentWriteState(state, group.suffix)); + toCloses.add(writer); + final Fields maskedFields = new FilterLeafReader.FilterFields(fields) { + @Override + public Iterator iterator() { + return group.fields.iterator(); + } + }; + writer.write(maskedFields, norms); + } + } + + private void writeBloomFilters(Fields fields) throws IOException { + for (String field : fields) { + final Terms terms = fields.terms(field); + if (terms == null) { + continue; + } + final int bloomFilterSize = bloomFilterSize(state.segmentInfo.maxDoc()); + final int numBytes = numBytesForBloomFilter(bloomFilterSize); + try (ByteArray buffer = bigArrays.newByteArray(numBytes)) { + final TermsEnum termsEnum = terms.iterator(); + while (true) { + final BytesRef term = termsEnum.next(); + if (term == null) { + break; + } + final int hash = hashTerm(term) % bloomFilterSize; + final int pos = hash >> 3; + final int mask = 1 << (hash & 0x7); + final byte val = (byte) (buffer.get(pos) | mask); + buffer.set(pos, val); + } + bloomFilters.add(new BloomFilter(field, indexOut.getFilePointer(), bloomFilterSize)); + final BytesReference bytes = BytesReference.fromByteArray(buffer, numBytes); + bytes.writeTo(new IndexOutputOutputStream(indexOut)); + } + } + } + + @Override + public void close() throws IOException { + if (closed) { + return; + } + closed = true; + try { + CodecUtil.writeFooter(indexOut); + } finally { + IOUtils.close(toCloses); + } + try (IndexOutput metaOut = state.directory.createOutput(metaFile(state.segmentInfo, state.segmentSuffix), state.context)) { + CodecUtil.writeIndexHeader(metaOut, BLOOM_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + // write postings formats + metaOut.writeVInt(fieldsGroups.size()); + for (FieldsGroup group : fieldsGroups) { + group.writeTo(metaOut, state.fieldInfos); + } + // Write bloom filters + metaOut.writeVInt(bloomFilters.size()); + for (BloomFilter bloomFilter : bloomFilters) { + bloomFilter.writeTo(metaOut, state.fieldInfos); + } + CodecUtil.writeFooter(metaOut); + } + } + } + + private record BloomFilter(String field, long startFilePointer, int bloomFilterSize) { + void writeTo(IndexOutput out, FieldInfos fieldInfos) throws IOException { + out.writeVInt(fieldInfos.fieldInfo(field).number); + out.writeVLong(startFilePointer); + out.writeVInt(bloomFilterSize); + } + + static BloomFilter readFrom(IndexInput in, FieldInfos fieldInfos) throws IOException { + final String fieldName = fieldInfos.fieldInfo(in.readVInt()).name; + final long startFilePointer = in.readVLong(); + final int bloomFilterSize = in.readVInt(); + return new BloomFilter(fieldName, startFilePointer, bloomFilterSize); + } + } + + private record FieldsGroup(PostingsFormat postingsFormat, String suffix, List fields) { + void writeTo(IndexOutput out, FieldInfos fieldInfos) throws IOException { + out.writeString(postingsFormat.getName()); + out.writeString(suffix); + out.writeVInt(fields.size()); + for (String field : fields) { + out.writeVInt(fieldInfos.fieldInfo(field).number); + + } + } + + static FieldsGroup readFrom(IndexInput in, FieldInfos fieldInfos) throws IOException { + final PostingsFormat postingsFormat = forName(in.readString()); + final String suffix = in.readString(); + final int numFields = in.readVInt(); + final List fields = new ArrayList<>(); + for (int i = 0; i < numFields; i++) { + fields.add(fieldInfos.fieldInfo(in.readVInt()).name); + } + return new FieldsGroup(postingsFormat, suffix, fields); + } + } + + static final class FieldsReader extends FieldsProducer { + private final Map bloomFilters; + private final List toCloses = new ArrayList<>(); + private final Map readerMap = new HashMap<>(); + private final IndexInput indexIn; + + FieldsReader(SegmentReadState state) throws IOException { + boolean success = false; + try ( + ChecksumIndexInput metaIn = state.directory.openChecksumInput( + metaFile(state.segmentInfo, state.segmentSuffix), + IOContext.READONCE + ) + ) { + CodecUtil.checkIndexHeader( + metaIn, + BLOOM_CODEC_NAME, + VERSION_START, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + // read postings formats + final int numFieldsGroups = metaIn.readVInt(); + for (int i = 0; i < numFieldsGroups; i++) { + final FieldsGroup group = FieldsGroup.readFrom(metaIn, state.fieldInfos); + final FieldsProducer reader = group.postingsFormat.fieldsProducer(new SegmentReadState(state, group.suffix)); + toCloses.add(reader); + for (String field : group.fields) { + readerMap.put(field, reader); + } + } + // read bloom filters + final int numBloomFilters = metaIn.readVInt(); + bloomFilters = new HashMap<>(numBloomFilters); + for (int i = 0; i < numBloomFilters; i++) { + final BloomFilter bloomFilter = BloomFilter.readFrom(metaIn, state.fieldInfos); + assert bloomFilter.bloomFilterSize == bloomFilterSize(state.segmentInfo.maxDoc()) + : "bloom_filter=" + bloomFilter + ", max_docs=" + state.segmentInfo.maxDoc(); + bloomFilters.put(bloomFilter.field, bloomFilter); + } + CodecUtil.checkFooter(metaIn); + indexIn = state.directory.openInput(indexFile(state.segmentInfo, state.segmentSuffix), state.context); + toCloses.add(indexIn); + CodecUtil.checkIndexHeader( + indexIn, + BLOOM_CODEC_NAME, + VERSION_START, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + CodecUtil.retrieveChecksum(indexIn); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(toCloses); + } + } + } + + @Override + public Iterator iterator() { + return readerMap.keySet().iterator(); + } + + @Override + public void close() throws IOException { + IOUtils.close(toCloses); + } + + @Override + public Terms terms(String field) throws IOException { + final FieldsProducer reader = readerMap.get(field); + if (reader == null) { + return null; + } + final Terms terms = reader.terms(field); + if (terms == null) { + return null; + } + final BloomFilter bloomFilter = bloomFilters.get(field); + if (bloomFilter != null) { + final RandomAccessInput data = indexIn.randomAccessSlice( + bloomFilter.startFilePointer(), + numBytesForBloomFilter(bloomFilter.bloomFilterSize) + ); + return new BloomFilterTerms(terms, data, bloomFilter.bloomFilterSize); + } else { + return terms; + } + } + + @Override + public int size() { + return readerMap.size(); + } + + @Override + public void checkIntegrity() throws IOException { + // already fully checked the meta file; let's fully checked the index file. + CodecUtil.checksumEntireFile(indexIn); + // multiple fields can share the same reader + final Set seenReaders = new HashSet<>(); + for (FieldsProducer reader : readerMap.values()) { + if (seenReaders.add(reader)) { + reader.checkIntegrity(); + } + } + } + } + + private static class BloomFilterTerms extends FilterLeafReader.FilterTerms { + private final RandomAccessInput data; + private final int bloomFilterSize; + + BloomFilterTerms(Terms in, RandomAccessInput data, int bloomFilterSize) { + super(in); + this.data = data; + this.bloomFilterSize = bloomFilterSize; + } + + private boolean mayContainTerm(BytesRef term) throws IOException { + final int hash = hashTerm(term) % bloomFilterSize; + final int pos = hash >> 3; + final int mask = 1 << (hash & 0x7); + final byte bits = data.readByte(pos); + return (bits & mask) != 0; + } + + @Override + public TermsEnum iterator() throws IOException { + return new LazyFilterTermsEnum() { + private TermsEnum delegate; + + @Override + TermsEnum getDelegate() throws IOException { + if (delegate == null) { + delegate = in.iterator(); + } + return delegate; + } + + @Override + public boolean seekExact(BytesRef term) throws IOException { + if (mayContainTerm(term)) { + return getDelegate().seekExact(term); + } else { + return false; + } + } + + @Override + public void seekExact(BytesRef term, TermState state) throws IOException { + getDelegate().seekExact(term, state); + } + + @Override + public TermState termState() throws IOException { + // TODO: return TermState that includes BloomFilter and fix _disk_usage API + return getDelegate().termState(); + } + }; + } + } + + private abstract static class LazyFilterTermsEnum extends BaseTermsEnum { + abstract TermsEnum getDelegate() throws IOException; + + @Override + public SeekStatus seekCeil(BytesRef text) throws IOException { + return getDelegate().seekCeil(text); + } + + @Override + public void seekExact(long ord) throws IOException { + getDelegate().seekExact(ord); + } + + @Override + public BytesRef term() throws IOException { + return getDelegate().term(); + } + + @Override + public long ord() throws IOException { + return getDelegate().ord(); + } + + @Override + public int docFreq() throws IOException { + return getDelegate().docFreq(); + } + + @Override + public long totalTermFreq() throws IOException { + return getDelegate().totalTermFreq(); + } + + @Override + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { + return getDelegate().postings(reuse, flags); + } + + @Override + public ImpactsEnum impacts(int flags) throws IOException { + return getDelegate().impacts(flags); + } + + @Override + public BytesRef next() throws IOException { + return getDelegate().next(); + } + + @Override + public AttributeSource attributes() { + try { + return getDelegate().attributes(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + + static int bloomFilterSize(int maxDocs) { + // 10% saturation (i.e., 10 bits for each term) + final long numBits = maxDocs * 10L; + if (numBits > Integer.MAX_VALUE) { + return Integer.MAX_VALUE; + } else { + return (int) numBits; + } + } + + static int numBytesForBloomFilter(int bloomFilterSize) { + return Math.toIntExact((bloomFilterSize + 7L) / 8L); + } + + static int hashTerm(BytesRef br) { + final int hash = murmurhash3_x86_32(br.bytes, br.offset, br.length, 0x9747b28c); + return hash & 0x7FFF_FFFF; + } + + /** + * Forked from Lucene's StringHelper#murmurhash3_x86_32 so that changes to the Lucene implementation + * do not break the compatibility of this format. + */ + @SuppressWarnings("fallthrough") + private static int murmurhash3_x86_32(byte[] data, int offset, int len, int seed) { + final int c1 = 0xcc9e2d51; + final int c2 = 0x1b873593; + + int h1 = seed; + int roundedEnd = offset + (len & 0xfffffffc); // round down to 4 byte block + + for (int i = offset; i < roundedEnd; i += 4) { + // little endian load order + int k1 = (int) BitUtil.VH_LE_INT.get(data, i); + k1 *= c1; + k1 = Integer.rotateLeft(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = Integer.rotateLeft(h1, 13); + h1 = h1 * 5 + 0xe6546b64; + } + + // tail + int k1 = 0; + + switch (len & 0x03) { + case 3: + k1 = (data[roundedEnd + 2] & 0xff) << 16; + // fallthrough + case 2: + k1 |= (data[roundedEnd + 1] & 0xff) << 8; + // fallthrough + case 1: + k1 |= (data[roundedEnd] & 0xff); + k1 *= c1; + k1 = Integer.rotateLeft(k1, 15); + k1 *= c2; + h1 ^= k1; + } + + // finalization + h1 ^= len; + + // fmix(h1); + h1 ^= h1 >>> 16; + h1 *= 0x85ebca6b; + h1 ^= h1 >>> 13; + h1 *= 0xc2b2ae35; + h1 ^= h1 >>> 16; + + return h1; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 14d5efd0bb872..cb24bd2b9c6e1 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -313,7 +313,7 @@ public IndexShard( assert shardRouting.initializing(); this.shardRouting = shardRouting; final Settings settings = indexSettings.getSettings(); - this.codecService = new CodecService(mapperService); + this.codecService = new CodecService(mapperService, bigArrays); this.warmer = warmer; this.similarityService = similarityService; Objects.requireNonNull(store, "Store must be provided to the index shard"); diff --git a/server/src/main/java/org/elasticsearch/index/store/LuceneFilesExtensions.java b/server/src/main/java/org/elasticsearch/index/store/LuceneFilesExtensions.java index c9abf0d13cd14..f05236df59113 100644 --- a/server/src/main/java/org/elasticsearch/index/store/LuceneFilesExtensions.java +++ b/server/src/main/java/org/elasticsearch/index/store/LuceneFilesExtensions.java @@ -18,6 +18,9 @@ public enum LuceneFilesExtensions { + // Elasticsearch BloomFilterPostingsFormat + BFI("bfi", "BloomFilter Index", false, true), + BFM("bfm", "BloomFilter Metadata", true, false), CFE("cfe", "Compound Files Entries", true, false), // Compound files are tricky because they store all the information for the segment. Benchmarks // suggested that not mapping them hurts performance. diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat new file mode 100644 index 0000000000000..91890d721b27f --- /dev/null +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat @@ -0,0 +1 @@ +org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat diff --git a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java index d98b2954f45f1..7755dd140daf9 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -19,6 +19,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.IndexAnalyzers; @@ -93,7 +94,7 @@ private CodecService createCodecService() throws IOException { settings.getMode().idFieldMapperWithoutFieldData(), ScriptCompiler.NONE ); - return new CodecService(service); + return new CodecService(service, BigArrays.NON_RECYCLING_INSTANCE); } } diff --git a/server/src/test/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormatTests.java b/server/src/test/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormatTests.java new file mode 100644 index 0000000000000..6c3af18800741 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormatTests.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.bloomfilter; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.PostingsFormat; +import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.tests.index.BasePostingsFormatTestCase; +import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.GraalVMThreadsFilter; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +@ThreadLeakFilters(filters = { GraalVMThreadsFilter.class }) +public class ES85BloomFilterPostingsFormatTests extends BasePostingsFormatTestCase { + + @Override + protected Codec getCodec() { + return TestUtil.alwaysPostingsFormat(new ES85BloomFilterPostingsFormat(BigArrays.NON_RECYCLING_INSTANCE, field -> { + PostingsFormat postingsFormat = Codec.getDefault().postingsFormat(); + if (postingsFormat instanceof PerFieldPostingsFormat) { + postingsFormat = TestUtil.getDefaultPostingsFormat(); + } + return postingsFormat; + })); + } + + public void testBloomFilterSize() { + assertThat(ES85BloomFilterPostingsFormat.bloomFilterSize(1000), equalTo(10_000)); + assertThat(ES85BloomFilterPostingsFormat.bloomFilterSize(IndexWriter.MAX_DOCS - random().nextInt(10)), equalTo(Integer.MAX_VALUE)); + assertThat(ES85BloomFilterPostingsFormat.numBytesForBloomFilter(16384), equalTo(2048)); + assertThat(ES85BloomFilterPostingsFormat.numBytesForBloomFilter(16383), equalTo(2048)); + assertThat(ES85BloomFilterPostingsFormat.numBytesForBloomFilter(Integer.MAX_VALUE), equalTo(1 << 28)); + } + + public void testHashTerms() { + Map testStrings = Map.of( + "hello", + 1568626408, + "elasticsearch", + 1410942402, + "elastic", + 255526858, + "java", + 684588044, + "lucene", + 881308315, + "bloom_filter", + 83797118, + "", + 1807139368 + ); + for (Map.Entry e : testStrings.entrySet()) { + String term = e.getKey(); + BytesRef byteRef = randomBytesRef(term.getBytes(StandardCharsets.UTF_8)); + int hash = ES85BloomFilterPostingsFormat.hashTerm(byteRef); + assertThat("term=" + term, hash, equalTo(e.getValue())); + } + + Map testBytes = Map.of( + new byte[] { 126, 49, -19, -128, 4, -77, 114, -61, 104, -58, -35, 113, 107 }, + 1155258673, + new byte[] { -50, 83, -18, 81, -44, -75, -77, 124, -76, 62, -16, 99, 75, -55, 119 }, + 973344634, + new byte[] { 110, -26, 71, -17, -113, -83, 58, 31, 13, -32, 38, -61, -97, -104, -9, -38 }, + 1950254802, + new byte[] { -20, 20, -88, 12, 5, -38, -50, 33, -21, -13, 90, 37, 28, -35, 107, 93, 30, -32, -76, 38 }, + 1123005351, + new byte[] { 88, -112, -11, -59, -103, 5, -107, -56, 14, 31, 2, -5, 67, -108, -125, 42, 28 }, + 1411536425, + new byte[] { 114, 82, -59, -103, 0, 7, -77 }, + 1883229848, + new byte[] { 34, 91, -26, 90, 21, -64, -72, 0, 101, -12, -33, 27, 119, 77, -13, 39, -60, -53 }, + 603518683, + new byte[] { 3, -68, -103, -125, 74, 122, -64, -19 }, + 84707471, + new byte[] { 0 }, + 691257000, + new byte[] { 1 }, + 955192589 + ); + for (Map.Entry e : testBytes.entrySet()) { + byte[] term = e.getKey(); + final BytesRef bytesRef = randomBytesRef(term); + int hash = ES85BloomFilterPostingsFormat.hashTerm(bytesRef); + assertThat("term=" + Arrays.toString(term), hash, equalTo(e.getValue())); + } + + byte[] bytes = ESTestCase.randomByteArrayOfLength(ESTestCase.between(0, 1000)); + assertThat(ES85BloomFilterPostingsFormat.hashTerm(randomBytesRef(bytes)), greaterThanOrEqualTo(0)); + } + + private static BytesRef randomBytesRef(byte[] bytes) { + if (random().nextBoolean()) { + final BytesRefBuilder builder = new BytesRefBuilder(); + // prefix + int offset = ESTestCase.randomIntBetween(0, 10); + builder.append(new BytesRef(ESTestCase.randomByteArrayOfLength(offset))); + // term + builder.append(bytes, 0, bytes.length); + // suffix + int suffixLength = ESTestCase.between(0, 10); + builder.append(new BytesRef(ESTestCase.randomByteArrayOfLength(suffixLength))); + return new BytesRef(builder.bytes(), offset, bytes.length); + } else { + return new BytesRef(bytes); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index aefb2cd06ba79..15d982b3dd030 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -3122,7 +3122,7 @@ public void testFailStart() throws IOException { } public void testSettings() { - CodecService codecService = new CodecService(null); + CodecService codecService = newCodecService(); LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName()); @@ -3560,7 +3560,7 @@ public void testRecoverFromForeignTranslog() throws IOException { newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null), + newCodecService(), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), @@ -7228,7 +7228,7 @@ public void testNotWarmUpSearcherInEngineCtor() throws Exception { config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null), + newCodecService(), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index eb987a30f966e..86275be900c8d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalyzerScope; @@ -136,7 +137,7 @@ protected IndexAnalyzers createIndexAnalyzers(IndexSettings indexSettings) { public void testPostingsFormat() throws IOException { MapperService mapperService = createMapperService(fieldMapping(this::minimalMapping)); - CodecService codecService = new CodecService(mapperService); + CodecService codecService = new CodecService(mapperService, BigArrays.NON_RECYCLING_INSTANCE); Codec codec = codecService.codec("default"); assertThat(codec, instanceOf(PerFieldMapperCodec.class)); PerFieldMapperCodec perFieldCodec = (PerFieldMapperCodec) codec; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java index 4fa3071890070..3ef368785ae4e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java @@ -20,6 +20,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.codec.PerFieldMapperCodec; import org.elasticsearch.index.mapper.DocumentMapper; @@ -450,7 +451,7 @@ public void testKnnVectorsFormat() throws IOException { b.field("ef_construction", efConstruction); b.endObject(); })); - CodecService codecService = new CodecService(mapperService); + CodecService codecService = new CodecService(mapperService, BigArrays.NON_RECYCLING_INSTANCE); Codec codec = codecService.codec("default"); assertThat(codec, instanceOf(PerFieldMapperCodec.class)); KnnVectorsFormat knnVectorsFormat = ((PerFieldMapperCodec) codec).getKnnVectorsFormatForField("field"); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index fc494f546fecd..91f7d262cec7a 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -4400,7 +4401,7 @@ public void testCloseShardWhileEngineIsWarming() throws Exception { config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null), + new CodecService(null, BigArrays.NON_RECYCLING_INSTANCE), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index f4f6229acc8f4..98b38ce7258d7 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -137,7 +137,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), - new CodecService(null), + new CodecService(null, BigArrays.NON_RECYCLING_INSTANCE), eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 561054d052bc6..158ae436c59f8 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.InternalEngine; @@ -385,7 +386,7 @@ EngineConfig configWithRefreshListener(EngineConfig config, ReferenceManager.Ref config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null), + new CodecService(null, BigArrays.NON_RECYCLING_INSTANCE), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index ab85a6c12a71f..aab0fed9ec3f8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -205,7 +205,7 @@ protected Settings indexSettings() { public void setUp() throws Exception { super.setUp(); primaryTerm.set(randomLongBetween(1, Long.MAX_VALUE)); - CodecService codecService = new CodecService(null); + CodecService codecService = newCodecService(); String name = Codec.getDefault().getName(); if (Arrays.asList(codecService.availableCodecs()).contains(name)) { // some codecs are read only so we only take the ones that we have in the service and randomly @@ -259,7 +259,7 @@ public EngineConfig copy(EngineConfig config, LongSupplier globalCheckpointSuppl config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null), + newCodecService(), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), @@ -287,7 +287,7 @@ public EngineConfig copy(EngineConfig config, Analyzer analyzer) { config.getMergePolicy(), analyzer, config.getSimilarity(), - new CodecService(null), + newCodecService(), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), @@ -315,7 +315,7 @@ public EngineConfig copy(EngineConfig config, MergePolicy mergePolicy) { mergePolicy, config.getAnalyzer(), config.getSimilarity(), - new CodecService(null), + newCodecService(), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), @@ -833,7 +833,7 @@ public EngineConfig config( mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), - new CodecService(null), + newCodecService(), eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), @@ -869,7 +869,7 @@ protected EngineConfig config(EngineConfig config, Store store, Path translogPat config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null), + newCodecService(), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), @@ -1614,4 +1614,8 @@ private static LazySoftDeletesDirectoryReaderWrapper.LazyBits lazyBits(LeafReade // hard fail - we can't get the lazybits throw new IllegalStateException("Can not extract lazy bits from given index reader [" + reader + "]"); } + + static CodecService newCodecService() { + return new CodecService(null, BigArrays.NON_RECYCLING_INSTANCE); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 7d52f28d32d6c..1e1381929e4b7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -452,7 +452,9 @@ protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builde RandomNumbers.randomIntBetween(random, 1, 15) + "ms" ); } - + if (randomBoolean()) { + builder.put(IndexSettings.BLOOM_FILTER_ID_FIELD_ENABLED_SETTING.getKey(), randomBoolean()); + } return builder; } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 2f65b68e7311d..a355afde36b52 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -509,7 +509,8 @@ static String[] extractLeaderShardHistoryUUIDs(Map ccrIndexMetad MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, EngineConfig.INDEX_CODEC_SETTING, - DataTier.TIER_PREFERENCE_SETTING + DataTier.TIER_PREFERENCE_SETTING, + IndexSettings.BLOOM_FILTER_ID_FIELD_ENABLED_SETTING ); public static Settings filter(Settings originalSettings) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index 446ff71171031..72b9ed68c1a2f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -267,7 +267,7 @@ private EngineConfig engineConfig( newMergePolicy(), indexWriterConfig.getAnalyzer(), indexWriterConfig.getSimilarity(), - new CodecService(null), + new CodecService(null, BigArrays.NON_RECYCLING_INSTANCE), new Engine.EventListener() { @Override public void onFailedEngine(String reason, Exception e) { From 24e367fe0fbf3da9c910c34ccba6177f8bbc53a3 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 8 Aug 2022 08:38:48 -0700 Subject: [PATCH 133/265] Add support for source fallback with the boolean field type (#89052) This change adds a SourceValueFetcherSortedBooleanIndexFieldData to support boolean doc values for source fallback. --- docs/changelog/89052.yaml | 5 + .../test/painless/50_script_doc_values.yml | 119 ++++++++++++++ ...lueFetcherSortedBooleanIndexFieldData.java | 154 ++++++++++++++++++ .../index/mapper/BooleanFieldMapper.java | 35 +++- .../script/field/BooleanDocValuesField.java | 2 +- 5 files changed, 311 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/89052.yaml create mode 100644 server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java diff --git a/docs/changelog/89052.yaml b/docs/changelog/89052.yaml new file mode 100644 index 0000000000000..88674956ca729 --- /dev/null +++ b/docs/changelog/89052.yaml @@ -0,0 +1,5 @@ +pr: 89052 +summary: Add support for source fallback with the boolean field type +area: Mapping +type: enhancement +issues: [] diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml index 8167de737fcb3..85b5dc4680b6e 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -9,6 +9,9 @@ setup: properties: boolean: type: boolean + boolean_no_doc_values: + type: boolean + doc_values: false date: type: date nanos: @@ -79,6 +82,7 @@ setup: body: rank: 1 boolean: true + boolean_no_doc_values: true date: 2017-01-01T12:11:12 nanos: 2015-01-01T12:10:30.123456789Z geo_point: 41.12,-71.34 @@ -117,6 +121,7 @@ setup: body: rank: 3 boolean: [true, false, true] + boolean_no_doc_values: [true, false, true] ip: ["10.1.2.3", "2001:db8::2:1"] date: [2017-01-01T12:11:12, 2018-01-01T12:11:12] nanos: [2015-01-01T12:10:30.123456789Z, 2015-01-01T12:10:30.987654321Z] @@ -256,6 +261,120 @@ setup: source: "field('boolean').size()" - match: { hits.hits.0.fields.field.0: 0 } +--- +"boolean_no_doc_values": + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['boolean_no_doc_values'].get(0)" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['boolean_no_doc_values'].value" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "field('boolean_no_doc_values').get(false)" + - match: { hits.hits.0.fields.field.0: true } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "/* avoid yaml stash for '$' */ $('boolean_no_doc_values', false)" + - match: { hits.hits.0.fields.field.0: true } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "2" } } + script_fields: + field: + script: + source: "field('boolean_no_doc_values').get(false)" + - match: { hits.hits.0.fields.field.0: false } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "2" } } + script_fields: + field: + script: + source: "/* avoid yaml stash for '$' */ $('boolean_no_doc_values', false)" + - match: { hits.hits.0.fields.field.0: false } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "field('boolean_no_doc_values').get(1, false)" + - match: { hits.hits.0.fields.field.0: false } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "int total = 0; for (boolean b : field('boolean_no_doc_values')) { total += b ? 1 : 0; } total;" + - match: { hits.hits.0.fields.field.0: 1 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "3" } } + script_fields: + field: + script: + source: "int total = 0; for (boolean b : field('boolean_no_doc_values')) { total += b ? 1 : 0; } total + field('boolean').size();" + - match: { hits.hits.0.fields.field.0: 5 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "2" } } + script_fields: + field: + script: + source: "field('boolean_no_doc_values').size()" + - match: { hits.hits.0.fields.field.0: 0 } + --- "date": - skip: diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java new file mode 100644 index 0000000000000..3913cdb4042ae --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.fielddata; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedNumericDocValues; +import org.elasticsearch.index.mapper.ValueFetcher; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.script.field.DocValuesScriptFieldFactory; +import org.elasticsearch.script.field.ToScriptFieldFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.Collections; + +public class SourceValueFetcherSortedBooleanIndexFieldData extends SourceValueFetcherIndexFieldData { + + public static class Builder extends SourceValueFetcherIndexFieldData.Builder { + + public Builder( + String fieldName, + ValuesSourceType valuesSourceType, + ValueFetcher valueFetcher, + SourceLookup sourceLookup, + ToScriptFieldFactory toScriptFieldFactory + ) { + super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + } + + @Override + public SourceValueFetcherSortedBooleanIndexFieldData build(IndexFieldDataCache cache, CircuitBreakerService breakerService) { + return new SourceValueFetcherSortedBooleanIndexFieldData( + fieldName, + valuesSourceType, + valueFetcher, + sourceLookup, + toScriptFieldFactory + ); + } + } + + protected SourceValueFetcherSortedBooleanIndexFieldData( + String fieldName, + ValuesSourceType valuesSourceType, + ValueFetcher valueFetcher, + SourceLookup sourceLookup, + ToScriptFieldFactory toScriptFieldFactory + ) { + super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + } + + @Override + public SourceValueFetcherLeafFieldData loadDirect(LeafReaderContext context) throws Exception { + return new SourceValueFetcherSortedBooleanLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceLookup); + } + + private static class SourceValueFetcherSortedBooleanLeafFieldData extends SourceValueFetcherLeafFieldData { + + private SourceValueFetcherSortedBooleanLeafFieldData( + ToScriptFieldFactory toScriptFieldFactory, + LeafReaderContext leafReaderContext, + ValueFetcher valueFetcher, + SourceLookup sourceLookup + ) { + super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceLookup); + } + + @Override + public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { + return toScriptFieldFactory.getScriptFieldFactory( + new SourceValueFetcherSortedBooleanDocValues(leafReaderContext, valueFetcher, sourceLookup), + name + ); + } + } + + private static class SourceValueFetcherSortedBooleanDocValues extends SortedNumericDocValues implements ValueFetcherDocValues { + + private final LeafReaderContext leafReaderContext; + + private final ValueFetcher valueFetcher; + private final SourceLookup sourceLookup; + + private int trueCount; + private int falseCount; + private int iteratorIndex; + + private SourceValueFetcherSortedBooleanDocValues( + LeafReaderContext leafReaderContext, + ValueFetcher valueFetcher, + SourceLookup sourceLookup + ) { + this.leafReaderContext = leafReaderContext; + this.valueFetcher = valueFetcher; + this.sourceLookup = sourceLookup; + } + + @Override + public boolean advanceExact(int doc) throws IOException { + sourceLookup.setSegmentAndDocument(leafReaderContext, doc); + + for (Object value : valueFetcher.fetchValues(sourceLookup, Collections.emptyList())) { + assert value instanceof Boolean; + if ((Boolean) value) { + ++trueCount; + } else { + ++falseCount; + } + } + + iteratorIndex = 0; + + return true; + } + + @Override + public int docValueCount() { + return trueCount + falseCount; + } + + @Override + public long nextValue() throws IOException { + assert iteratorIndex < trueCount + falseCount; + return iteratorIndex++ < falseCount ? 0L : 1L; + } + + @Override + public int docID() { + throw new UnsupportedOperationException("not supported for source fallback"); + } + + @Override + public int nextDoc() throws IOException { + throw new UnsupportedOperationException("not supported for source fallback"); + } + + @Override + public int advance(int target) throws IOException { + throw new UnsupportedOperationException("not supported for source fallback"); + } + + @Override + public long cost() { + throw new UnsupportedOperationException("not supported for source fallback"); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 9927bbae30ff5..8f085195dae0c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.SourceValueFetcherSortedBooleanIndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.BooleanFieldScript; @@ -37,6 +38,7 @@ import org.elasticsearch.script.ScriptCompiler; import org.elasticsearch.script.field.BooleanDocValuesField; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.search.lookup.FieldValues; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.xcontent.XContentBuilder; @@ -48,6 +50,7 @@ import java.util.Collections; import java.util.Map; import java.util.Objects; +import java.util.Set; /** * A field mapper for boolean fields. @@ -198,7 +201,11 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) if (this.scriptValues != null) { return FieldValues.valueFetcher(this.scriptValues, context); } - return new SourceValueFetcher(name(), context, nullValue) { + return sourceValueFetcher(context.isSourceEnabled() ? context.sourcePath(name()) : Collections.emptySet()); + } + + private SourceValueFetcher sourceValueFetcher(Set sourcePaths) { + return new SourceValueFetcher(sourcePaths, nullValue) { @Override protected Boolean parseSourceValue(Object value) { if (value instanceof Boolean) { @@ -255,8 +262,30 @@ public Boolean valueForDisplay(Object value) { @Override public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { - failIfNoDocValues(); - return new SortedNumericIndexFieldData.Builder(name(), NumericType.BOOLEAN, BooleanDocValuesField::new); + FielddataOperation operation = fieldDataContext.fielddataOperation(); + + if (operation == FielddataOperation.SEARCH) { + failIfNoDocValues(); + } + + if ((operation == FielddataOperation.SEARCH || operation == FielddataOperation.SCRIPT) && hasDocValues()) { + return new SortedNumericIndexFieldData.Builder(name(), NumericType.BOOLEAN, BooleanDocValuesField::new); + } + + if (operation == FielddataOperation.SCRIPT) { + SearchLookup searchLookup = fieldDataContext.lookupSupplier().get(); + Set sourcePaths = fieldDataContext.sourcePathsLookup().apply(name()); + + return new SourceValueFetcherSortedBooleanIndexFieldData.Builder( + name(), + CoreValuesSourceType.BOOLEAN, + sourceValueFetcher(sourcePaths), + searchLookup.source(), + BooleanDocValuesField::new + ); + } + + throw new IllegalStateException("unknown field data type [" + operation.name() + "]"); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java index 6e39598c0349e..ab2c6af34db5e 100644 --- a/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java +++ b/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java @@ -43,7 +43,7 @@ public void setNextDocId(int docId) throws IOException { if (input.advanceExact(docId)) { resize(input.docValueCount()); for (int i = 0; i < count; i++) { - values[i] = input.nextValue() == 1; + values[i] = input.nextValue() == 1L; } } else { resize(0); From 81265d2c2a16dd651894d8151ae0045bf4f64371 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 8 Aug 2022 08:39:13 -0700 Subject: [PATCH 134/265] Add support for source fallback with scaled float field type (#89053) This change adds source fallback support for scaled float. This uses the already existing class SourceValueFetcherSortedDoubleIndexFieldData. --- docs/changelog/89053.yaml | 5 ++ .../test/painless/50_script_doc_values.yml | 58 +++++++++++++++++++ .../mapper/extras/ScaledFloatFieldMapper.java | 48 ++++++++++++--- 3 files changed, 102 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/89053.yaml diff --git a/docs/changelog/89053.yaml b/docs/changelog/89053.yaml new file mode 100644 index 0000000000000..e8ee495cc0db1 --- /dev/null +++ b/docs/changelog/89053.yaml @@ -0,0 +1,5 @@ +pr: 89053 +summary: Add support for source fallback with scaled float field type +area: Mapping +type: enhancement +issues: [] diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml index 85b5dc4680b6e..dd2187673134a 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -66,6 +66,10 @@ setup: scaled_float: type: scaled_float scaling_factor: 100 + scaled_float_no_doc_values: + type: scaled_float + scaling_factor: 100 + doc_values: false token_count: type: token_count analyzer: standard @@ -105,6 +109,7 @@ setup: half_float: 3.140625 half_float_no_doc_values: 3.140625 scaled_float: 3.14 + scaled_float_no_doc_values: 3.14 token_count: count all these words please - do: @@ -144,6 +149,7 @@ setup: half_float: [1.123, 2.234] half_float_no_doc_values: [2.234, 1.123] scaled_float: [-3.5, 2.5] + scaled_float_no_doc_values: [2.5, -3.5] - do: @@ -2605,6 +2611,58 @@ setup: source: "doc['scaled_float'].value" - match: { hits.hits.0.fields.field.0: 3.14 } +--- +"scaled_float_no_doc_values": + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['scaled_float_no_doc_values'].get(0)" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['scaled_float_no_doc_values'].value" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "/* avoid stash */ $('scaled_float_no_doc_values', 0.0)" + - match: { hits.hits.0.fields.field.0: 3.14 } + - match: { hits.hits.1.fields.field.0: 0.0 } + - match: { hits.hits.2.fields.field.0: -3.5 } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('scaled_float_no_doc_values').get(1, 0.0)" + - match: { hits.hits.0.fields.field.0: 0.0 } + - match: { hits.hits.1.fields.field.0: 0.0 } + - match: { hits.hits.2.fields.field.0: 2.5 } + --- "token_count": - do: diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index 40dc7ebd390af..84c51fe0ab6c1 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.index.fielddata.SourceValueFetcherSortedDoubleIndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; @@ -43,7 +44,9 @@ import org.elasticsearch.script.field.ScaledFloatDocValuesField; import org.elasticsearch.script.field.ToScriptFieldFactory; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser.Token; @@ -57,6 +60,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; /** A {@link FieldMapper} for scaled floats. Values are internally multiplied * by a scaling factor and rounded to the closest long. */ @@ -276,15 +280,37 @@ public Query rangeQuery( @Override public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { - failIfNoDocValues(); - return (cache, breakerService) -> { - final IndexNumericFieldData scaledValues = new SortedNumericIndexFieldData.Builder( + FielddataOperation operation = fieldDataContext.fielddataOperation(); + + if (operation == FielddataOperation.SEARCH) { + failIfNoDocValues(); + } + + if ((operation == FielddataOperation.SEARCH || operation == FielddataOperation.SCRIPT) && hasDocValues()) { + return (cache, breakerService) -> { + final IndexNumericFieldData scaledValues = new SortedNumericIndexFieldData.Builder( + name(), + IndexNumericFieldData.NumericType.LONG, + (dv, n) -> { throw new UnsupportedOperationException(); } + ).build(cache, breakerService); + return new ScaledFloatIndexFieldData(scaledValues, scalingFactor, ScaledFloatDocValuesField::new); + }; + } + + if (operation == FielddataOperation.SCRIPT) { + SearchLookup searchLookup = fieldDataContext.lookupSupplier().get(); + Set sourcePaths = fieldDataContext.sourcePathsLookup().apply(name()); + + return new SourceValueFetcherSortedDoubleIndexFieldData.Builder( name(), - IndexNumericFieldData.NumericType.LONG, - (dv, n) -> { throw new UnsupportedOperationException(); } - ).build(cache, breakerService); - return new ScaledFloatIndexFieldData(scaledValues, scalingFactor, ScaledFloatDocValuesField::new); - }; + CoreValuesSourceType.NUMERIC, + sourceValueFetcher(sourcePaths), + searchLookup.source(), + ScaledFloatDocValuesField::new + ); + } + + throw new IllegalStateException("unknown field data type [" + operation.name() + "]"); } @Override @@ -292,7 +318,11 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - return new SourceValueFetcher(name(), context) { + return sourceValueFetcher(context.isSourceEnabled() ? context.sourcePath(name()) : Collections.emptySet()); + } + + private SourceValueFetcher sourceValueFetcher(Set sourcePaths) { + return new SourceValueFetcher(sourcePaths, nullValue) { @Override protected Double parseSourceValue(Object value) { double doubleValue; From ac25477e40679e07d0059f44afb9be17c3eba8f4 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Mon, 8 Aug 2022 17:06:07 +0100 Subject: [PATCH 135/265] Quote paths with whitespace in Windows service CLIs (#89072) --- .../windows/service/ProcrunCommand.java | 7 +++- .../service/WindowsServiceInstallCommand.java | 15 ++++++--- .../windows/service/ProcrunCommandTests.java | 4 +++ .../service/WindowsServiceCliTestCase.java | 33 +++++++++++++++---- .../WindowsServiceInstallCommandTests.java | 14 +++++--- .../WindowsServiceManagerCommandTests.java | 7 +++- .../WindowsServiceRemoveCommandTests.java | 5 +++ .../WindowsServiceStartCommandTests.java | 5 +++ .../WindowsServiceStopCommandTests.java | 5 +++ docs/changelog/89072.yaml | 6 ++++ .../elasticsearch/cli/CommandTestCase.java | 17 +++++++++- 11 files changed, 100 insertions(+), 18 deletions(-) create mode 100644 docs/changelog/89072.yaml diff --git a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java index c10495d3b8af6..b507e5e43a456 100644 --- a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java +++ b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java @@ -67,7 +67,7 @@ protected void execute(Terminal terminal, OptionSet options, ProcessInfo process preExecute(terminal, processInfo, serviceId); List procrunCmd = new ArrayList<>(); - procrunCmd.add(procrun.toString()); + procrunCmd.add(quote(procrun.toString())); procrunCmd.add("//%s/%s".formatted(cmd, serviceId)); if (includeLogArgs()) { procrunCmd.add(getLogArgs(serviceId, processInfo.workingDir(), processInfo.envVars())); @@ -86,6 +86,11 @@ protected void execute(Terminal terminal, OptionSet options, ProcessInfo process } } + /** Quotes the given String. */ + static String quote(String s) { + return '"' + s + '"'; + } + /** Determines the service id for the Elasticsearch service that should be used */ private String getServiceId(OptionSet options, Map env) throws UserException { List args = options.nonOptionArguments(); diff --git a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceInstallCommand.java b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceInstallCommand.java index 4e6e2cddfeb93..0d0bd040db30a 100644 --- a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceInstallCommand.java +++ b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceInstallCommand.java @@ -42,7 +42,7 @@ protected String getAdditionalArgs(String serviceId, ProcessInfo pinfo) { addArg(args, "--Classpath", pinfo.sysprops().get("java.class.path")); addArg(args, "--JvmMs", "4m"); addArg(args, "--JvmMx", "64m"); - addArg(args, "--JvmOptions", getJvmOptions(pinfo.sysprops())); + addQuotedArg(args, "--JvmOptions", getJvmOptions(pinfo.sysprops())); addArg(args, "--PidFile", "%s.pid".formatted(serviceId)); addArg( args, @@ -55,10 +55,10 @@ protected String getAdditionalArgs(String serviceId, ProcessInfo pinfo) { pinfo.envVars() .getOrDefault("SERVICE_DESCRIPTION", "Elasticsearch %s Windows Service - https://elastic.co".formatted(Version.CURRENT)) ); - addArg(args, "--Jvm", getJvmDll(getJavaHome(pinfo.sysprops())).toString()); + addQuotedArg(args, "--Jvm", quote(getJvmDll(getJavaHome(pinfo.sysprops())).toString())); addArg(args, "--StartMode", "jvm"); addArg(args, "--StopMode", "jvm"); - addArg(args, "--StartPath", pinfo.workingDir().toString()); + addQuotedArg(args, "--StartPath", quote(pinfo.workingDir().toString())); addArg(args, "++JvmOptions", "-Dcli.name=windows-service-daemon"); addArg(args, "++JvmOptions", "-Dcli.libs=lib/tools/server-cli,lib/tools/windows-service-cli"); addArg(args, "++Environment", "HOSTNAME=%s".formatted(pinfo.envVars().get("COMPUTERNAME"))); @@ -89,6 +89,13 @@ private static void addArg(List args, String arg, String value) { args.add(value); } + // Adds an arg with an already appropriately quoted value. Trivial, but explicit implementation. + // This method is typically used when adding args whose value contains a file-system path + private static void addQuotedArg(List args, String arg, String value) { + args.add(arg); + args.add(value); + } + @SuppressForbidden(reason = "get java home path to pass through") private static Path getJavaHome(Map sysprops) { return Paths.get(sysprops.get("java.home")); @@ -107,7 +114,7 @@ private static String getJvmOptions(Map sysprops) { jvmOptions.add("-XX:+UseSerialGC"); // passthrough these properties for (var prop : List.of("es.path.home", "es.path.conf", "es.distribution.type")) { - jvmOptions.add("-D%s=%s".formatted(prop, sysprops.get(prop))); + jvmOptions.add("-D%s=%s".formatted(prop, quote(sysprops.get(prop)))); } return String.join(";", jvmOptions); } diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java index b683884a37571..e4b651fcb77af 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java @@ -25,6 +25,10 @@ public class ProcrunCommandTests extends WindowsServiceCliTestCase { + public ProcrunCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + PreExecuteHook preExecuteHook; boolean includeLogArgs; String additionalArgs; diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceCliTestCase.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceCliTestCase.java index b727774ea2d1d..808173005b96f 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceCliTestCase.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceCliTestCase.java @@ -8,6 +8,8 @@ package org.elasticsearch.windows.service; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.elasticsearch.cli.CommandTestCase; import org.junit.Before; @@ -47,6 +49,15 @@ public abstract class WindowsServiceCliTestCase extends CommandTestCase { int mockProcessExit = 0; ProcessValidator mockProcessValidator = null; + @ParametersFactory + public static Iterable spaceInPathProvider() { + return List.of(new Object[] { true }, new Object[] { false }); + } + + protected WindowsServiceCliTestCase(boolean spaceInPath) { + super(spaceInPath); + } + interface ProcessValidator { void validate(Map env, ProcrunCall procrunCall); } @@ -106,16 +117,22 @@ protected Process mockProcess(ProcessBuilder processBuilder) throws IOException private static final Pattern commandPattern = Pattern.compile("//([A-Z]{2})/([\\w-]+)"); private static ProcrunCall parseProcrunCall(String unparsedArgs) { + // command/exe is quoted + assert unparsedArgs.charAt(0) == '"'; + int idx = unparsedArgs.indexOf('"', 1); + String exe = unparsedArgs.substring(0, idx + 1); + // Strip the leading command/exe from the args + unparsedArgs = unparsedArgs.substring(idx + 1).stripLeading(); + String[] splitArgs = unparsedArgs.split(" "); - assertThat(unparsedArgs, splitArgs.length, greaterThanOrEqualTo(2)); + assertThat(unparsedArgs, splitArgs.length, greaterThanOrEqualTo(1)); Map> args = new HashMap<>(); - String exe = splitArgs[0]; - Matcher commandMatcher = commandPattern.matcher(splitArgs[1]); - assertThat(splitArgs[1], commandMatcher.matches(), is(true)); + Matcher commandMatcher = commandPattern.matcher(splitArgs[0]); + assertThat(splitArgs[0], commandMatcher.matches(), is(true)); String command = commandMatcher.group(1); String serviceId = commandMatcher.group(2); - int i = 2; + int i = 1; while (i < splitArgs.length) { String arg = splitArgs[i]; assertThat("procrun args begin with -- or ++", arg, anyOf(startsWith("--"), startsWith("++"))); @@ -165,8 +182,12 @@ public void resetMockProcess() throws Exception { protected abstract String getDefaultFailureMessage(); + static String quote(String s) { + return '"' + s + '"'; + } + protected String getExe() { - return serviceExe.toString(); + return quote(serviceExe.toString()); } protected boolean includeLogsArgs() { diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java index ffd0e16fd6f79..0db531074498f 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java @@ -31,6 +31,10 @@ public class WindowsServiceInstallCommandTests extends WindowsServiceCliTestCase Path jvmDll; + public WindowsServiceInstallCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + @Before public void setupJvm() throws Exception { jvmDll = javaHome.resolve("jre/bin/server/jvm.dll"); @@ -80,7 +84,7 @@ public void testAlternateDllLocation() throws Exception { } public void testDll() throws Exception { - assertServiceArgs(Map.of("Jvm", jvmDll.toString())); + assertServiceArgs(Map.of("Jvm", quote(jvmDll.toString()))); } public void testPreExecuteOutput() throws Exception { @@ -95,9 +99,9 @@ public void testJvmOptions() throws Exception { sysprops.put("es.distribution.type", "testdistro"); List expectedOptions = List.of( "" + "-XX:+UseSerialGC", - "-Des.path.home=" + esHomeDir.toString(), - "-Des.path.conf=" + esHomeDir.resolve("config").toString(), - "-Des.distribution.type=testdistro" + "-Des.path.home=" + quote(esHomeDir.toString()), + "-Des.path.conf=" + quote(esHomeDir.resolve("config").toString()), + "-Des.distribution.type=" + quote("testdistro") ); mockProcessValidator = (environment, procrunCall) -> { List options = procrunCall.args().get("JvmOptions"); @@ -136,7 +140,7 @@ public void testFixedArgs() throws Exception { entry("StopMode", "jvm"), entry("JvmMs", "4m"), entry("JvmMx", "64m"), - entry("StartPath", esHomeDir.toString()), + entry("StartPath", quote(esHomeDir.toString())), entry("Classpath", "javaclasspath") // dummy value for tests ) ); diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceManagerCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceManagerCommandTests.java index cd3aea949f0f6..1699dd3f78316 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceManagerCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceManagerCommandTests.java @@ -13,6 +13,11 @@ import java.io.IOException; public class WindowsServiceManagerCommandTests extends WindowsServiceCliTestCase { + + public WindowsServiceManagerCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + @Override protected Command newCommand() { return new WindowsServiceManagerCommand() { @@ -25,7 +30,7 @@ Process startProcess(ProcessBuilder processBuilder) throws IOException { @Override protected String getExe() { - return mgrExe.toString(); + return quote(mgrExe.toString()); } @Override diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceRemoveCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceRemoveCommandTests.java index 3d2032d75a195..d0e72e9de5c66 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceRemoveCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceRemoveCommandTests.java @@ -13,6 +13,11 @@ import java.io.IOException; public class WindowsServiceRemoveCommandTests extends WindowsServiceCliTestCase { + + public WindowsServiceRemoveCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + @Override protected Command newCommand() { return new WindowsServiceRemoveCommand() { diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStartCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStartCommandTests.java index 7a30540d53ba0..502008d22422f 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStartCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStartCommandTests.java @@ -13,6 +13,11 @@ import java.io.IOException; public class WindowsServiceStartCommandTests extends WindowsServiceCliTestCase { + + public WindowsServiceStartCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + @Override protected Command newCommand() { return new WindowsServiceStartCommand() { diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStopCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStopCommandTests.java index f623c5d2465f3..a36e090bd7ac4 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStopCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStopCommandTests.java @@ -13,6 +13,11 @@ import java.io.IOException; public class WindowsServiceStopCommandTests extends WindowsServiceCliTestCase { + + public WindowsServiceStopCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + @Override protected Command newCommand() { return new WindowsServiceStopCommand() { diff --git a/docs/changelog/89072.yaml b/docs/changelog/89072.yaml new file mode 100644 index 0000000000000..6647c892141d0 --- /dev/null +++ b/docs/changelog/89072.yaml @@ -0,0 +1,6 @@ +pr: 89072 +summary: Quote paths with whitespace in Windows service CLIs +area: Infra/CLI +type: bug +issues: + - 89043 diff --git a/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java b/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java index 09e07f32d820a..6c3573e2594d3 100644 --- a/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java @@ -41,12 +41,27 @@ public abstract class CommandTestCase extends ESTestCase { /** The ES config dir */ protected Path configDir; + /** Whether to include a whitespace in the file-system path. */ + private final boolean spaceInPath; + + protected CommandTestCase() { + this(false); + } + + protected CommandTestCase(boolean spaceInPath) { + this.spaceInPath = spaceInPath; + } + @Before public void resetTerminal() throws IOException { terminal.reset(); terminal.setSupportsBinary(false); terminal.setVerbosity(Terminal.Verbosity.NORMAL); - esHomeDir = createTempDir(); + if (spaceInPath) { + esHomeDir = createTempDir("a b"); // contains a whitespace + } else { + esHomeDir = createTempDir(); + } configDir = esHomeDir.resolve("config"); Files.createDirectory(configDir); sysprops.clear(); From 2429dbc451f0018ff14bbaa36461a7b1f94375d5 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 8 Aug 2022 19:03:30 +0200 Subject: [PATCH 136/265] Dry up custom immutable Map.Entry implementations (#89153) Follow-up to #88815. No need to have two equivalent implementations here. --- .../elasticsearch/cluster/DiffableUtils.java | 26 ++--------- .../common/collect/ImmutableOpenMap.java | 45 ++----------------- .../org/elasticsearch/common/util/Maps.java | 37 +++++++++++++++ 3 files changed, 45 insertions(+), 63 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index bcb9222e384ae..fc23db6015fa3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.util.Maps; import java.io.IOException; import java.util.ArrayList; @@ -153,7 +154,7 @@ private static > MapDiff createDiff( inserts++; } else if (entry.getValue().equals(previousValue) == false) { if (valueSerializer.supportsDiffableValues()) { - diffs.add(mapEntry(entry.getKey(), valueSerializer.diff(entry.getValue(), previousValue))); + diffs.add(new Maps.ImmutableEntry<>(entry.getKey(), valueSerializer.diff(entry.getValue(), previousValue))); } else { upserts.add(entry); } @@ -307,14 +308,14 @@ private MapDiff( for (int i = 0; i < diffsCount; i++) { K key = keySerializer.readKey(in); Diff diff = valueSerializer.readDiff(in, key); - diffs.add(mapEntry(key, diff)); + diffs.add(new Maps.ImmutableEntry<>(key, diff)); } int upsertsCount = in.readVInt(); upserts = upsertsCount == 0 ? List.of() : new ArrayList<>(upsertsCount); for (int i = 0; i < upsertsCount; i++) { K key = keySerializer.readKey(in); T newValue = valueSerializer.read(in, key); - upserts.add(mapEntry(key, newValue)); + upserts.add(new Maps.ImmutableEntry<>(key, newValue)); } this.builderCtor = builderCtor; } @@ -402,25 +403,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - private static Map.Entry mapEntry(K key, T newValue) { - return new Map.Entry<>() { - @Override - public K getKey() { - return key; - } - - @Override - public T getValue() { - return newValue; - } - - @Override - public T setValue(T value) { - throw new UnsupportedOperationException(); - } - }; - } - /** * Provides read and write operations to serialize keys of map * @param type of key diff --git a/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java b/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java index df5b57055bda9..895df8d34a96e 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java +++ b/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java @@ -14,6 +14,8 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.procedures.ObjectObjectProcedure; +import org.elasticsearch.common.util.Maps; + import java.util.AbstractCollection; import java.util.AbstractMap; import java.util.AbstractSet; @@ -127,45 +129,6 @@ public Set> entrySet() { return (es = entrySet) == null ? (entrySet = new EntrySet<>(map)) : es; } - private static final class ImmutableEntry implements Map.Entry { - private final KType key; - private final VType value; - - ImmutableEntry(KType key, VType value) { - this.key = key; - this.value = value; - } - - @Override - public KType getKey() { - return key; - } - - @Override - public VType getValue() { - return value; - } - - @Override - public VType setValue(VType value) { - throw new UnsupportedOperationException("collection is immutable"); - } - - @Override - @SuppressWarnings("rawtypes") - public boolean equals(Object o) { - if (this == o) return true; - if ((o instanceof Map.Entry) == false) return false; - Map.Entry that = (Map.Entry) o; - return Objects.equals(key, that.getKey()) && Objects.equals(value, that.getValue()); - } - - @Override - public int hashCode() { - return Objects.hashCode(key) ^ Objects.hashCode(value); - } - } - private static final class ConversionIterator implements Iterator> { private final Iterator> original; @@ -185,7 +148,7 @@ public Map.Entry next() { if (obj == null) { return null; } - return new ImmutableEntry<>(obj.key, obj.value); + return new Maps.ImmutableEntry<>(obj.key, obj.value); } @Override @@ -244,7 +207,7 @@ public Spliterator> spliterator() { @Override public void forEach(Consumer> action) { map.forEach((Consumer>) ooCursor -> { - ImmutableEntry entry = new ImmutableEntry<>(ooCursor.key, ooCursor.value); + Maps.ImmutableEntry entry = new Maps.ImmutableEntry<>(ooCursor.key, ooCursor.value); action.accept(entry); }); } diff --git a/server/src/main/java/org/elasticsearch/common/util/Maps.java b/server/src/main/java/org/elasticsearch/common/util/Maps.java index 417b880414e7e..a0ff346da0d9c 100644 --- a/server/src/main/java/org/elasticsearch/common/util/Maps.java +++ b/server/src/main/java/org/elasticsearch/common/util/Maps.java @@ -301,4 +301,41 @@ public static Map copyOf(Map source, Function copyValue } return copy; } + + /** + * An immutable implementation of {@link Map.Entry}. + * @param key key + * @param value value + */ + public record ImmutableEntry (KType key, VType value) implements Map.Entry { + + @Override + public KType getKey() { + return key; + } + + @Override + public VType getValue() { + return value; + } + + @Override + public VType setValue(VType value) { + throw new UnsupportedOperationException(); + } + + @Override + @SuppressWarnings("rawtypes") + public boolean equals(Object o) { + if (this == o) return true; + if ((o instanceof Map.Entry) == false) return false; + Map.Entry that = (Map.Entry) o; + return Objects.equals(key, that.getKey()) && Objects.equals(value, that.getValue()); + } + + @Override + public int hashCode() { + return Objects.hashCode(key) ^ Objects.hashCode(value); + } + } } From f3659a64c9dceb41a8af93ece5312fcf009442d6 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 8 Aug 2022 19:18:29 +0200 Subject: [PATCH 137/265] Remove redundant and slow null token check from KeywordFieldMapper (#89168) No need to check for the null token manually and parse `textOrNull`. Either we can just use `text()` since we know we don't have to deal with a null token or use `textOrNull` and check the return value for `null`. I chose the latterbecause it benchmarked slightly faster in `BeatsMapperBenchmark` but both save the expensive call to `currentToken` on the heavily nested x-content parser that we use here. --- .../index/mapper/KeywordFieldMapper.java | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 32bfda76d6d2c..8c4f5649f7915 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -66,7 +66,6 @@ import org.elasticsearch.search.runtime.StringScriptFieldTermQuery; import org.elasticsearch.search.runtime.StringScriptFieldWildcardQuery; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.io.UncheckedIOException; @@ -929,15 +928,8 @@ public KeywordFieldType fieldType() { @Override protected void parseCreateField(DocumentParserContext context) throws IOException { - String value; - XContentParser parser = context.parser(); - if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { - value = fieldType().nullValue; - } else { - value = parser.textOrNull(); - } - - indexValue(context, value); + final String value = context.parser().textOrNull(); + indexValue(context, value == null ? fieldType().nullValue : value); } @Override From 398b0147a7117f54989d272f1ff510ec910fb5da Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 8 Aug 2022 12:34:58 -0700 Subject: [PATCH 138/265] Upgrade Gradle wrapper to 7.5.1 (#88918) --- build-tools-internal/gradle/wrapper/gradle-wrapper.properties | 4 ++-- build-tools-internal/src/main/resources/minimumGradleVersion | 2 +- gradle/wrapper/gradle-wrapper.properties | 4 ++-- plugins/examples/gradle/wrapper/gradle-wrapper.properties | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index b871071c412e2..e939ec976751d 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=97a52d145762adc241bad7fd18289bf7f6801e08ece6badf80402fe2b9f250b1 +distributionSha256Sum=db9c8211ed63f61f60292c69e80d89196f9eb36665e369e7f00ac4cc841c2219 diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index 72906051c5c71..7501d508f743f 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -7.5 \ No newline at end of file +7.5.1 \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index b871071c412e2..e939ec976751d 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=97a52d145762adc241bad7fd18289bf7f6801e08ece6badf80402fe2b9f250b1 +distributionSha256Sum=db9c8211ed63f61f60292c69e80d89196f9eb36665e369e7f00ac4cc841c2219 diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.properties b/plugins/examples/gradle/wrapper/gradle-wrapper.properties index b871071c412e2..e939ec976751d 100644 --- a/plugins/examples/gradle/wrapper/gradle-wrapper.properties +++ b/plugins/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=97a52d145762adc241bad7fd18289bf7f6801e08ece6badf80402fe2b9f250b1 +distributionSha256Sum=db9c8211ed63f61f60292c69e80d89196f9eb36665e369e7f00ac4cc841c2219 From cdbd7ad5434b444b4d6c5256dca883a29f16a781 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 8 Aug 2022 15:32:08 -0700 Subject: [PATCH 139/265] Add publishing plugin to elasticsearch-grok project (#89184) It seems https://github.com/elastic/elasticsearch/pull/88982 introduced a dependency on `elasticsearch-grok` to `x-pack-core`. Since the latter is published to Maven Central, this means consumers will have issues resolving it's dependencies since `elasticsearch-grok` isn't published. This pull request resolves this, by adding the publishing plugin to the `grok` library. We'll then follow up separately to add that to our release configuration. --- libs/grok/build.gradle | 1 + 1 file changed, 1 insertion(+) diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index 9b40935e2aa7b..b97ef133ba0a4 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -5,6 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ +apply plugin: 'elasticsearch.publish' dependencies { api 'org.jruby.joni:joni:2.1.29' From e6cfd9c263361c9c7dc03d521b74c347f23d2181 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 9 Aug 2022 12:12:05 +1000 Subject: [PATCH 140/265] Show assigned role descriptors in Get/QueryApiKey response (#89166) This PR adds a new `role_descriptors` field in the API key entity returned by both GetApiKey and QueryApiKey APIs. The field value is the map of the role descriptors that are assigned to an API key when creating or updating the key. If the key has no assigned role descriptors, i.e. it inherits the owner user's privileges, an empty object is returned in place. Relates: #89058 --- docs/changelog/89166.yaml | 5 + .../core/security/action/apikey/ApiKey.java | 42 ++- .../security/action/apikey/ApiKeyTests.java | 20 +- .../action/apikey/GetApiKeyResponseTests.java | 79 +++++- .../apikey/QueryApiKeyResponseTests.java | 27 +- .../security/authz/RoleDescriptorTests.java | 10 +- .../xpack/security/apikey/ApiKeyRestIT.java | 121 +++++++++ .../security/authc/ApiKeyIntegTests.java | 257 +++++++++++++----- .../xpack/security/authc/ApiKeyService.java | 52 ++-- .../security/authc/ApiKeyServiceTests.java | 2 +- .../apikey/RestGetApiKeyActionTests.java | 23 +- 11 files changed, 525 insertions(+), 113 deletions(-) create mode 100644 docs/changelog/89166.yaml rename x-pack/plugin/{security/src/test/java/org/elasticsearch/xpack => core/src/test/java/org/elasticsearch/xpack/core}/security/authz/RoleDescriptorTests.java (98%) diff --git a/docs/changelog/89166.yaml b/docs/changelog/89166.yaml new file mode 100644 index 0000000000000..e5d25756d70dc --- /dev/null +++ b/docs/changelog/89166.yaml @@ -0,0 +1,5 @@ +pr: 89166 +summary: Show assigned role descriptors in Get/QueryApiKey response +area: Security +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index f99db76868ca5..7fdf802b9976a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -17,9 +17,11 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; import java.time.Instant; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -39,6 +41,8 @@ public final class ApiKey implements ToXContentObject, Writeable { private final String username; private final String realm; private final Map metadata; + @Nullable + private final List roleDescriptors; public ApiKey( String name, @@ -48,7 +52,8 @@ public ApiKey( boolean invalidated, String username, String realm, - @Nullable Map metadata + @Nullable Map metadata, + @Nullable List roleDescriptors ) { this.name = name; this.id = id; @@ -61,6 +66,7 @@ public ApiKey( this.username = username; this.realm = realm; this.metadata = metadata == null ? Map.of() : metadata; + this.roleDescriptors = roleDescriptors; } public ApiKey(StreamInput in) throws IOException { @@ -80,6 +86,12 @@ public ApiKey(StreamInput in) throws IOException { } else { this.metadata = Map.of(); } + if (in.getVersion().onOrAfter(Version.V_8_5_0)) { + final List roleDescriptors = in.readOptionalList(RoleDescriptor::new); + this.roleDescriptors = roleDescriptors != null ? List.copyOf(roleDescriptors) : null; + } else { + this.roleDescriptors = null; + } } public String getId() { @@ -114,6 +126,10 @@ public Map getMetadata() { return metadata; } + public List getRoleDescriptors() { + return roleDescriptors; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -130,6 +146,13 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t .field("username", username) .field("realm", realm) .field("metadata", (metadata == null ? Map.of() : metadata)); + if (roleDescriptors != null) { + builder.startObject("role_descriptors"); + for (var roleDescriptor : roleDescriptors) { + builder.field(roleDescriptor.getName(), roleDescriptor); + } + builder.endObject(); + } return builder; } @@ -149,11 +172,14 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_8_0_0)) { out.writeGenericMap(metadata); } + if (out.getVersion().onOrAfter(Version.V_8_5_0)) { + out.writeOptionalCollection(roleDescriptors); + } } @Override public int hashCode() { - return Objects.hash(name, id, creation, expiration, invalidated, username, realm, metadata); + return Objects.hash(name, id, creation, expiration, invalidated, username, realm, metadata, roleDescriptors); } @Override @@ -175,7 +201,8 @@ public boolean equals(Object obj) { && Objects.equals(invalidated, other.invalidated) && Objects.equals(username, other.username) && Objects.equals(realm, other.realm) - && Objects.equals(metadata, other.metadata); + && Objects.equals(metadata, other.metadata) + && Objects.equals(roleDescriptors, other.roleDescriptors); } @SuppressWarnings("unchecked") @@ -188,7 +215,8 @@ public boolean equals(Object obj) { (Boolean) args[4], (String) args[5], (String) args[6], - (args[7] == null) ? null : (Map) args[7] + (args[7] == null) ? null : (Map) args[7], + (List) args[8] ); }); static { @@ -200,6 +228,10 @@ public boolean equals(Object obj) { PARSER.declareString(constructorArg(), new ParseField("username")); PARSER.declareString(constructorArg(), new ParseField("realm")); PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { + p.nextToken(); + return RoleDescriptor.parse(n, p, false); + }, new ParseField("role_descriptors")); } public static ApiKey fromXContent(XContentParser parser) throws IOException { @@ -224,6 +256,8 @@ public String toString() { + realm + ", metadata=" + metadata + + ", role_descriptors=" + + roleDescriptors + "]"; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java index 88515ff046460..763bbcff4e026 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java @@ -10,9 +10,11 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; import java.time.Instant; @@ -20,8 +22,11 @@ import java.util.Map; import java.util.Objects; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests.randomUniquelyNamedRoleDescriptors; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; public class ApiKeyTests extends ESTestCase { @@ -38,8 +43,9 @@ public void testXContent() throws IOException { final String username = randomAlphaOfLengthBetween(4, 10); final String realmName = randomAlphaOfLengthBetween(3, 8); final Map metadata = randomMetadata(); + final List roleDescriptors = randomBoolean() ? null : randomUniquelyNamedRoleDescriptors(0, 3); - final ApiKey apiKey = new ApiKey(name, id, creation, expiration, invalidated, username, realmName, metadata); + final ApiKey apiKey = new ApiKey(name, id, creation, expiration, invalidated, username, realmName, metadata, roleDescriptors); // The metadata will never be null because the constructor convert it to empty map if a null is passed in assertThat(apiKey.getMetadata(), notNullValue()); @@ -59,6 +65,18 @@ public void testXContent() throws IOException { assertThat(map.get("username"), equalTo(username)); assertThat(map.get("realm"), equalTo(realmName)); assertThat(map.get("metadata"), equalTo(Objects.requireNonNullElseGet(metadata, Map::of))); + + if (roleDescriptors == null) { + assertThat(map, not(hasKey("role_descriptors"))); + } else { + @SuppressWarnings("unchecked") + final Map rdMap = (Map) map.get("role_descriptors"); + assertThat(rdMap.size(), equalTo(roleDescriptors.size())); + for (var roleDescriptor : roleDescriptors) { + assertThat(rdMap, hasKey(roleDescriptor.getName())); + assertThat(XContentTestUtils.convertToMap(roleDescriptor), equalTo(rdMap.get(roleDescriptor.getName()))); + } + } } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java index 1486a1c7edcd7..cbc707f335e50 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java @@ -9,19 +9,26 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import java.io.IOException; import java.time.Instant; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests.randomUniquelyNamedRoleDescriptors; import static org.hamcrest.Matchers.equalTo; public class GetApiKeyResponseTests extends ESTestCase { @@ -37,12 +44,29 @@ public void testSerialization() throws IOException { false, randomAlphaOfLength(4), randomAlphaOfLength(5), - randomBoolean() ? null : Map.of(randomAlphaOfLengthBetween(3, 8), randomAlphaOfLengthBetween(3, 8)) + randomBoolean() ? null : Map.of(randomAlphaOfLengthBetween(3, 8), randomAlphaOfLengthBetween(3, 8)), + randomBoolean() ? null : randomUniquelyNamedRoleDescriptors(0, 3) ); GetApiKeyResponse response = new GetApiKeyResponse(Collections.singletonList(apiKeyInfo)); + + final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.ManageApplicationPrivileges.WRITEABLE_NAME, + ConfigurableClusterPrivileges.ManageApplicationPrivileges::createFrom + ), + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.WriteProfileDataPrivileges.WRITEABLE_NAME, + ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom + ) + ) + ); + try (BytesStreamOutput output = new BytesStreamOutput()) { response.writeTo(output); - try (StreamInput input = output.bytes().streamInput()) { + try (StreamInput input = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { GetApiKeyResponse serialized = new GetApiKeyResponse(input); assertThat(serialized.getApiKeyInfos(), equalTo(response.getApiKeyInfos())); } @@ -50,6 +74,16 @@ public void testSerialization() throws IOException { } public void testToXContent() throws IOException { + final List roleDescriptors = List.of( + new RoleDescriptor( + "rd_42", + new String[] { "monitor" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("index").privileges("read").build() }, + new String[] { "foo" } + ) + ); + ApiKey apiKeyInfo1 = createApiKeyInfo( "name1", "id-1", @@ -58,6 +92,7 @@ public void testToXContent() throws IOException { false, "user-a", "realm-x", + null, null ); ApiKey apiKeyInfo2 = createApiKeyInfo( @@ -68,7 +103,8 @@ public void testToXContent() throws IOException { true, "user-b", "realm-y", - Map.of() + Map.of(), + List.of() ); ApiKey apiKeyInfo3 = createApiKeyInfo( null, @@ -78,7 +114,8 @@ public void testToXContent() throws IOException { true, "user-c", "realm-z", - Map.of("foo", "bar") + Map.of("foo", "bar"), + roleDescriptors ); GetApiKeyResponse response = new GetApiKeyResponse(Arrays.asList(apiKeyInfo1, apiKeyInfo2, apiKeyInfo3)); XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -104,7 +141,8 @@ public void testToXContent() throws IOException { "invalidated": true, "username": "user-b", "realm": "realm-y", - "metadata": {} + "metadata": {}, + "role_descriptors": {} }, { "id": "id-3", @@ -115,6 +153,32 @@ public void testToXContent() throws IOException { "realm": "realm-z", "metadata": { "foo": "bar" + }, + "role_descriptors": { + "rd_42": { + "cluster": [ + "monitor" + ], + "indices": [ + { + "names": [ + "index" + ], + "privileges": [ + "read" + ], + "allow_restricted_indices": false + } + ], + "applications": [], + "run_as": [ + "foo" + ], + "metadata": {}, + "transient_metadata": { + "enabled": true + } + } } } ] @@ -129,8 +193,9 @@ private ApiKey createApiKeyInfo( boolean invalidated, String username, String realm, - Map metadata + Map metadata, + List roleDescriptors ) { - return new ApiKey(name, id, creation, expiration, invalidated, username, realm, metadata); + return new ApiKey(name, id, creation, expiration, invalidated, username, realm, metadata, roleDescriptors); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java index b35c0011c96e4..2827dbc3de4e3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java @@ -7,8 +7,12 @@ package org.elasticsearch.xpack.core.security.action.apikey; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import java.io.IOException; import java.time.Instant; @@ -18,6 +22,8 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests.randomUniquelyNamedRoleDescriptors; + public class QueryApiKeyResponseTests extends AbstractWireSerializingTestCase { @Override @@ -58,6 +64,24 @@ protected QueryApiKeyResponse mutateInstance(QueryApiKeyResponse instance) throw } } + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.ManageApplicationPrivileges.WRITEABLE_NAME, + ConfigurableClusterPrivileges.ManageApplicationPrivileges::createFrom + ), + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.WriteProfileDataPrivileges.WRITEABLE_NAME, + ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom + ) + ) + ); + } + private QueryApiKeyResponse.Item randomItem() { return new QueryApiKeyResponse.Item(randomApiKeyInfo(), randomSortValues()); } @@ -70,7 +94,8 @@ private ApiKey randomApiKeyInfo() { final Instant creation = Instant.ofEpochMilli(randomMillisUpToYear9999()); final Instant expiration = randomBoolean() ? Instant.ofEpochMilli(randomMillisUpToYear9999()) : null; final Map metadata = ApiKeyTests.randomMetadata(); - return new ApiKey(name, id, creation, expiration, false, username, realm_name, metadata); + final List roleDescriptors = randomFrom(randomUniquelyNamedRoleDescriptors(0, 3), null); + return new ApiKey(name, id, creation, expiration, false, username, realm_name, metadata, roleDescriptors); } private Object[] randomSortValues() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java similarity index 98% rename from x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java index 1135afbe1020d..aa241c37736cf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.security.authz; +package org.elasticsearch.xpack.core.security.authz; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; @@ -24,7 +24,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; @@ -626,6 +625,13 @@ public void testIsEmpty() { } } + public static List randomUniquelyNamedRoleDescriptors(int minSize, int maxSize) { + return randomValueOtherThanMany( + roleDescriptors -> roleDescriptors.stream().map(RoleDescriptor::getName).distinct().count() != roleDescriptors.size(), + () -> randomList(minSize, maxSize, () -> randomRoleDescriptor(false)) + ); + } + public static RoleDescriptor randomRoleDescriptor() { return randomRoleDescriptor(true); } diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index a23bfdd60f87e..d047e1b4ff6d6 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; import org.junit.After; import org.junit.Before; @@ -32,6 +33,7 @@ import java.util.Set; import static org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField.RUN_AS_USER_HEADER; +import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -79,6 +81,125 @@ public void cleanUp() throws IOException { invalidateApiKeysForUser(MANAGE_OWN_API_KEY_USER); } + @SuppressWarnings("unchecked") + public void testGetApiKeyRoleDescriptors() throws IOException { + // First key without assigned role descriptors, i.e. it inherits owner user's permission + // This can be achieved by either omitting the role_descriptors field in the request or + // explicitly set it to an empty object + final Request createApiKeyRequest1 = new Request("POST", "_security/api_key"); + if (randomBoolean()) { + createApiKeyRequest1.setJsonEntity(""" + { + "name": "k1" + }"""); + } else { + createApiKeyRequest1.setJsonEntity(""" + { + "name": "k1", + "role_descriptors": { } + }"""); + } + assertOK(adminClient().performRequest(createApiKeyRequest1)); + + // Second key with a single assigned role descriptor + final Request createApiKeyRequest2 = new Request("POST", "_security/api_key"); + createApiKeyRequest2.setJsonEntity(""" + { + "name": "k2", + "role_descriptors": { + "x": { + "cluster": [ + "monitor" + ] + } + } + }"""); + assertOK(adminClient().performRequest(createApiKeyRequest2)); + + // Third key with two assigned role descriptors + final Request createApiKeyRequest3 = new Request("POST", "_security/api_key"); + createApiKeyRequest3.setJsonEntity(""" + { + "name": "k3", + "role_descriptors": { + "x": { + "cluster": [ + "monitor" + ] + }, + "y": { + "indices": [ + { + "names": [ + "index" + ], + "privileges": [ + "read" + ] + } + ] + } + } + }"""); + assertOK(adminClient().performRequest(createApiKeyRequest3)); + + // Role descriptors are returned by both get and query api key calls + final List> apiKeyMaps; + if (randomBoolean()) { + final Request getApiKeyRequest = new Request("GET", "_security/api_key"); + final Response getApiKeyResponse = adminClient().performRequest(getApiKeyRequest); + assertOK(getApiKeyResponse); + apiKeyMaps = (List>) responseAsMap(getApiKeyResponse).get("api_keys"); + } else { + final Request queryApiKeyRequest = new Request("POST", "_security/_query/api_key"); + final Response queryApiKeyResponse = adminClient().performRequest(queryApiKeyRequest); + assertOK(queryApiKeyResponse); + apiKeyMaps = (List>) responseAsMap(queryApiKeyResponse).get("api_keys"); + } + assertThat(apiKeyMaps.size(), equalTo(3)); + + for (Map apiKeyMap : apiKeyMaps) { + final String name = (String) apiKeyMap.get("name"); + @SuppressWarnings("unchecked") + final var roleDescriptors = (Map) apiKeyMap.get("role_descriptors"); + switch (name) { + case "k1" -> { + assertThat(roleDescriptors, anEmptyMap()); + } + case "k2" -> { + assertThat( + roleDescriptors, + equalTo( + Map.of("x", XContentTestUtils.convertToMap(new RoleDescriptor("x", new String[] { "monitor" }, null, null))) + ) + ); + } + case "k3" -> { + assertThat( + roleDescriptors, + equalTo( + Map.of( + "x", + XContentTestUtils.convertToMap(new RoleDescriptor("x", new String[] { "monitor" }, null, null)), + "y", + XContentTestUtils.convertToMap( + new RoleDescriptor( + "y", + null, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("index").privileges("read").build() }, + null + ) + ) + ) + ) + ); + } + default -> throw new IllegalStateException("unknown api key name [" + name + "]"); + } + } + } + @SuppressWarnings({ "unchecked" }) public void testAuthenticateResponseApiKey() throws IOException { final String expectedApiKeyName = "my-api-key-name"; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 7cf5fac69fff6..b5fec28365bba 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -84,10 +84,10 @@ import org.elasticsearch.xpack.core.security.authc.RealmDomain; import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.authz.RoleDescriptorTests; import org.elasticsearch.xpack.security.transport.filter.IPFilter; import org.junit.After; import org.junit.Before; @@ -649,13 +649,14 @@ public void testActiveApiKeysWithNoExpirationNeverGetDeletedByRemover() throws E 2, responses, tuple.v2(), + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), response, Collections.singleton(responses.get(0).getId()), Collections.singletonList(responses.get(1).getId()) ); } - public void testGetApiKeysForRealm() throws InterruptedException, ExecutionException { + public void testGetApiKeysForRealm() throws InterruptedException, ExecutionException, IOException { int noOfApiKeys = randomIntBetween(3, 5); final Tuple, List>> tuple = createApiKeys(noOfApiKeys, null); List responses = tuple.v1(); @@ -686,7 +687,15 @@ public void testGetApiKeysForRealm() throws InterruptedException, ExecutionExcep PlainActionFuture listener = new PlainActionFuture<>(); client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), listener); GetApiKeyResponse response = listener.get(); - verifyGetResponse(noOfApiKeys, responses, tuple.v2(), response, expectedValidKeyIds, invalidatedApiKeyIds); + verifyGetResponse( + noOfApiKeys, + responses, + tuple.v2(), + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), + response, + expectedValidKeyIds, + invalidatedApiKeyIds + ); } public void testGetApiKeysForUser() throws Exception { @@ -703,13 +712,14 @@ public void testGetApiKeysForUser() throws Exception { noOfApiKeys, responses, tuple.v2(), + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), response, responses.stream().map(o -> o.getId()).collect(Collectors.toSet()), null ); } - public void testGetApiKeysForRealmAndUser() throws InterruptedException, ExecutionException { + public void testGetApiKeysForRealmAndUser() throws InterruptedException, ExecutionException, IOException { final Tuple, List>> tuple = createApiKeys(1, null); List responses = tuple.v1(); Client client = client().filterWithHeader( @@ -718,10 +728,18 @@ public void testGetApiKeysForRealmAndUser() throws InterruptedException, Executi PlainActionFuture listener = new PlainActionFuture<>(); client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmAndUserName("file", ES_TEST_ROOT_USER), listener); GetApiKeyResponse response = listener.get(); - verifyGetResponse(1, responses, tuple.v2(), response, Collections.singleton(responses.get(0).getId()), null); + verifyGetResponse( + 1, + responses, + tuple.v2(), + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), + response, + Collections.singleton(responses.get(0).getId()), + null + ); } - public void testGetApiKeysForApiKeyId() throws InterruptedException, ExecutionException { + public void testGetApiKeysForApiKeyId() throws InterruptedException, ExecutionException, IOException { final Tuple, List>> tuple = createApiKeys(1, null); List responses = tuple.v1(); Client client = client().filterWithHeader( @@ -730,10 +748,18 @@ public void testGetApiKeysForApiKeyId() throws InterruptedException, ExecutionEx PlainActionFuture listener = new PlainActionFuture<>(); client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(responses.get(0).getId(), false), listener); GetApiKeyResponse response = listener.get(); - verifyGetResponse(1, responses, tuple.v2(), response, Collections.singleton(responses.get(0).getId()), null); + verifyGetResponse( + 1, + responses, + tuple.v2(), + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), + response, + Collections.singleton(responses.get(0).getId()), + null + ); } - public void testGetApiKeysForApiKeyName() throws InterruptedException, ExecutionException { + public void testGetApiKeysForApiKeyName() throws InterruptedException, ExecutionException, IOException { final Map headers = Collections.singletonMap( "Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING) @@ -757,7 +783,16 @@ public void testGetApiKeysForApiKeyName() throws InterruptedException, Execution List responses = randomFrom(createApiKeyResponses1, createApiKeyResponses2); List> metadatas = responses == createApiKeyResponses1 ? tuple1.v2() : tuple2.v2(); client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName(responses.get(0).getName(), false), listener); - verifyGetResponse(1, responses, metadatas, listener.get(), Collections.singleton(responses.get(0).getId()), null); + // role descriptors are the same between randomization + verifyGetResponse( + 1, + responses, + metadatas, + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), + listener.get(), + Collections.singleton(responses.get(0).getId()), + null + ); PlainActionFuture listener2 = new PlainActionFuture<>(); client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName("test-key*", false), listener2); @@ -765,10 +800,15 @@ public void testGetApiKeysForApiKeyName() throws InterruptedException, Execution noOfApiKeys, createApiKeyResponses1, tuple1.v2(), + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), listener2.get(), createApiKeyResponses1.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toSet()), null ); + expectAttributesForApiKeys( + createApiKeyResponses1.stream().map(CreateApiKeyResponse::getId).toList(), + Map.of(ApiKeyAttribute.ASSIGNED_ROLE_DESCRIPTORS, List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR)) + ); PlainActionFuture listener3 = new PlainActionFuture<>(); client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName("*", false), listener3); @@ -778,6 +818,7 @@ public void testGetApiKeysForApiKeyName() throws InterruptedException, Execution 2 * noOfApiKeys, responses, metadatas, + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), listener3.get(), responses.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toSet()), null @@ -785,7 +826,7 @@ public void testGetApiKeysForApiKeyName() throws InterruptedException, Execution PlainActionFuture listener4 = new PlainActionFuture<>(); client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName("does-not-exist*", false), listener4); - verifyGetResponse(0, Collections.emptyList(), null, listener4.get(), Collections.emptySet(), null); + verifyGetResponse(0, Collections.emptyList(), null, List.of(), listener4.get(), Collections.emptySet(), null); PlainActionFuture listener5 = new PlainActionFuture<>(); client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName("another-test-key*", false), listener5); @@ -793,6 +834,7 @@ public void testGetApiKeysForApiKeyName() throws InterruptedException, Execution noOfApiKeys, createApiKeyResponses2, tuple2.v2(), + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), listener5.get(), createApiKeyResponses2.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toSet()), null @@ -823,6 +865,7 @@ public void testGetApiKeysOwnedByCurrentAuthenticatedUser() throws InterruptedEx noOfApiKeysForUserWithManageApiKeyRole, userWithManageApiKeyRoleApiKeys, tuple.v2(), + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), response, userWithManageApiKeyRoleApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()), null @@ -850,6 +893,7 @@ public void testGetApiKeysOwnedByRunAsUserWhenOwnerIsTrue() throws ExecutionExce noOfApiKeysForUserWithManageApiKeyRole, userWithManageOwnApiKeyRoleApiKeys, tuple.v2(), + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), response, userWithManageOwnApiKeyRoleApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()), null @@ -881,6 +925,7 @@ public void testGetApiKeysOwnedByRunAsUserWhenRunAsUserInfoIsGiven() throws Exec noOfApiKeysForUserWithManageApiKeyRole, userWithManageOwnApiKeyRoleApiKeys, tuple.v2(), + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), response, userWithManageOwnApiKeyRoleApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()), null @@ -956,6 +1001,7 @@ public void testGetAllApiKeys() throws InterruptedException, ExecutionException totalApiKeys, allApiKeys, metadatas, + List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), response, allApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()), null @@ -1098,7 +1144,15 @@ public void testApiKeyAuthorizationApiKeyMustBeAbleToRetrieveItsOwnInformationBu PlainActionFuture listener = new PlainActionFuture<>(); client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(responses.get(0).getId(), randomBoolean()), listener); GetApiKeyResponse response = listener.get(); - verifyGetResponse(1, responses, tuple.v2(), response, Collections.singleton(responses.get(0).getId()), null); + verifyGetResponse( + 1, + responses, + tuple.v2(), + List.of(new RoleDescriptor(DEFAULT_API_KEY_ROLE_DESCRIPTOR.getName(), Strings.EMPTY_ARRAY, null, null)), + response, + Collections.singleton(responses.get(0).getId()), + null + ); final PlainActionFuture failureListener = new PlainActionFuture<>(); // for any other API key id, it must deny access @@ -1476,28 +1530,11 @@ public void testUpdateApiKeysForSingleKey() throws Exception { final boolean isUpdated = nullRoleDescriptors == false || metadataChanged; assertEquals(isUpdated, response.isUpdated()); - final PlainActionFuture getListener = new PlainActionFuture<>(); - client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(TEST_USER_NAME, TEST_PASSWORD_SECURE_STRING)) - ).execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(apiKeyId, false), getListener); - final GetApiKeyResponse getResponse = getListener.get(); - assertEquals(1, getResponse.getApiKeyInfos().length); - // When metadata for the update request is null (i.e., absent), we don't overwrite old metadata with it - final var expectedMetadata = request.getMetadata() != null ? request.getMetadata() : createdApiKey.v2(); - assertEquals(expectedMetadata == null ? Map.of() : expectedMetadata, getResponse.getApiKeyInfos()[0].getMetadata()); - assertEquals(TEST_USER_NAME, getResponse.getApiKeyInfos()[0].getUsername()); - assertEquals("file", getResponse.getApiKeyInfos()[0].getRealm()); - // Test authenticate works with updated API key final var authResponse = authenticateWithApiKey(apiKeyId, createdApiKey.v1().getKey()); assertThat(authResponse.get(User.Fields.USERNAME.getPreferredName()), equalTo(TEST_USER_NAME)); // Document updated as expected - final var updatedApiKeyDoc = getApiKeyDocument(apiKeyId); - expectMetadataForApiKey(expectedMetadata, updatedApiKeyDoc); - expectRoleDescriptorsForApiKey("limited_by_role_descriptors", expectedLimitedByRoleDescriptors, updatedApiKeyDoc); - final var expectedRoleDescriptors = nullRoleDescriptors ? List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR) : newRoleDescriptors; - expectRoleDescriptorsForApiKey("role_descriptors", expectedRoleDescriptors, updatedApiKeyDoc); final Map expectedCreator = new HashMap<>(); expectedCreator.put("principal", TEST_USER_NAME); expectedCreator.put("full_name", null); @@ -1505,7 +1542,22 @@ public void testUpdateApiKeysForSingleKey() throws Exception { expectedCreator.put("metadata", Map.of()); expectedCreator.put("realm_type", "file"); expectedCreator.put("realm", "file"); - expectCreatorForApiKey(expectedCreator, updatedApiKeyDoc); + final var expectedMetadata = request.getMetadata() != null ? request.getMetadata() : createdApiKey.v2(); + final var expectedRoleDescriptors = nullRoleDescriptors ? List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR) : newRoleDescriptors; + + expectAttributesForApiKey( + apiKeyId, + Map.of( + ApiKeyAttribute.CREATOR, + expectedCreator, + ApiKeyAttribute.METADATA, + expectedMetadata == null ? Map.of() : expectedMetadata, + ApiKeyAttribute.ASSIGNED_ROLE_DESCRIPTORS, + expectedRoleDescriptors, + ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, + expectedLimitedByRoleDescriptors + ) + ); // Check if update resulted in API key role going from `monitor` to `all` cluster privilege and assert that action that requires // `all` is authorized or denied accordingly @@ -1556,10 +1608,17 @@ public void testBulkUpdateApiKeysForMultipleKeys() throws ExecutionException, In ) ); for (String apiKeyId : apiKeyIds) { - final Map doc = getApiKeyDocument(apiKeyId); - expectRoleDescriptorsForApiKey("role_descriptors", newRoleDescriptors, doc); - expectRoleDescriptorsForApiKey("limited_by_role_descriptors", expectedLimitedByRoleDescriptors, doc); - expectMetadataForApiKey(newMetadata, doc); + expectAttributesForApiKey( + apiKeyId, + Map.of( + ApiKeyAttribute.METADATA, + newMetadata, + ApiKeyAttribute.ASSIGNED_ROLE_DESCRIPTORS, + newRoleDescriptors, + ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, + expectedLimitedByRoleDescriptors + ) + ); } // Check that bulk update works when there are no actual updates @@ -1581,10 +1640,17 @@ public void testBulkUpdateApiKeysForMultipleKeys() throws ExecutionException, In assertThat(response.getNoops(), containsInAnyOrder(apiKeyIds.toArray())); assertThat(response.getErrorDetails().keySet(), containsInAnyOrder(notFoundIds.toArray())); for (String apiKeyId : apiKeyIds) { - final Map doc = getApiKeyDocument(apiKeyId); - expectRoleDescriptorsForApiKey("role_descriptors", newRoleDescriptors, doc); - expectRoleDescriptorsForApiKey("limited_by_role_descriptors", expectedLimitedByRoleDescriptors, doc); - expectMetadataForApiKey(newMetadata, doc); + expectAttributesForApiKey( + apiKeyId, + Map.of( + ApiKeyAttribute.METADATA, + newMetadata, + ApiKeyAttribute.ASSIGNED_ROLE_DESCRIPTORS, + newRoleDescriptors, + ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, + expectedLimitedByRoleDescriptors + ) + ); } // Check that bulk update works when some or all updates result in errors @@ -1670,11 +1736,11 @@ public void testBulkUpdateApiKeysWithDifferentLimitedByRoleDescriptorsForSameUse "all" ); final List firstGenerationApiKeyIds = firstGenerationApiKeys.v1().stream().map(CreateApiKeyResponse::getId).toList(); - expectRoleDescriptorsForApiKeys( - "limited_by_role_descriptors", - Set.of(firstGenerationRoleDescriptor), - firstGenerationApiKeyIds.stream().map(this::getApiKeyDocument).toList() + expectAttributesForApiKeys( + firstGenerationApiKeyIds, + Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(firstGenerationRoleDescriptor)) ); + // Update user's permissions and create new API keys for the user. The new API keys will have different limited-by role descriptors final List secondGenerationClusterPrivileges = randomValueOtherThan(firstGenerationClusterPrivileges, () -> { final List privs = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names())); @@ -1693,10 +1759,9 @@ public void testBulkUpdateApiKeysWithDifferentLimitedByRoleDescriptorsForSameUse "all" ); final List secondGenerationApiKeyIds = secondGenerationApiKeys.v1().stream().map(CreateApiKeyResponse::getId).toList(); - expectRoleDescriptorsForApiKeys( - "limited_by_role_descriptors", - Set.of(secondGenerationRoleDescriptor), - secondGenerationApiKeyIds.stream().map(this::getApiKeyDocument).toList() + expectAttributesForApiKeys( + secondGenerationApiKeyIds, + Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(secondGenerationRoleDescriptor)) ); // Update user role then bulk update all API keys. This should result in new limited-by role descriptors for all API keys final List allIds = Stream.concat(firstGenerationApiKeyIds.stream(), secondGenerationApiKeyIds.stream()).toList(); @@ -1722,11 +1787,7 @@ public void testBulkUpdateApiKeysWithDifferentLimitedByRoleDescriptorsForSameUse assertThat(response.getErrorDetails(), anEmptyMap()); assertThat(response.getNoops(), empty()); assertThat(response.getUpdated(), containsInAnyOrder(allIds.toArray())); - expectRoleDescriptorsForApiKeys( - "limited_by_role_descriptors", - Set.of(finalRoleDescriptor), - allIds.stream().map(this::getApiKeyDocument).toList() - ); + expectAttributesForApiKeys(allIds, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(finalRoleDescriptor))); } public void testUpdateApiKeysAutoUpdatesUserFields() throws Exception { @@ -1755,7 +1816,7 @@ public void testUpdateApiKeysAutoUpdatesUserFields() throws Exception { "all" ).v1().get(0); final String apiKeyId = createdApiKey.getId(); - expectRoleDescriptorsForApiKey("limited_by_role_descriptors", Set.of(roleDescriptorBeforeUpdate), getApiKeyDocument(apiKeyId)); + expectAttributesForApiKey(apiKeyId, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(roleDescriptorBeforeUpdate))); final List newClusterPrivileges = randomValueOtherThan(clusterPrivileges, () -> { final List privs = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names())); @@ -1777,7 +1838,7 @@ public void testUpdateApiKeysAutoUpdatesUserFields() throws Exception { assertNotNull(response); assertTrue(response.isUpdated()); - expectRoleDescriptorsForApiKey("limited_by_role_descriptors", Set.of(roleDescriptorAfterUpdate), getApiKeyDocument(apiKeyId)); + expectAttributesForApiKey(apiKeyId, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(roleDescriptorAfterUpdate))); // Update user role name only final RoleDescriptor roleDescriptorWithNewName = putRoleWithClusterPrivileges( @@ -1796,8 +1857,6 @@ public void testUpdateApiKeysAutoUpdatesUserFields() throws Exception { assertNotNull(response); assertTrue(response.isUpdated()); - final Map updatedApiKeyDoc = getApiKeyDocument(apiKeyId); - expectRoleDescriptorsForApiKey("limited_by_role_descriptors", Set.of(roleDescriptorWithNewName), updatedApiKeyDoc); final Map expectedCreator = new HashMap<>(); expectedCreator.put("principal", updatedUser.principal()); expectedCreator.put("full_name", updatedUser.fullName()); @@ -1805,7 +1864,10 @@ public void testUpdateApiKeysAutoUpdatesUserFields() throws Exception { expectedCreator.put("metadata", updatedUser.metadata()); expectedCreator.put("realm_type", "native"); expectedCreator.put("realm", "index"); - expectCreatorForApiKey(expectedCreator, updatedApiKeyDoc); + expectAttributesForApiKey( + apiKeyId, + Map.of(ApiKeyAttribute.CREATOR, expectedCreator, ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(roleDescriptorWithNewName)) + ); } public void testUpdateApiKeysNotFoundScenarios() throws Exception { @@ -2104,6 +2166,7 @@ public void testUpdateApiKeysAutoUpdatesLegacySuperuserRoleDescriptor() throws E legacySuperuserRoleDescriptor ) ); + // raw document has the legacy superuser role descriptor expectRoleDescriptorsForApiKey("limited_by_role_descriptors", legacySuperuserRoleDescriptor, getApiKeyDocument(apiKeyId)); final Set currentSuperuserRoleDescriptors = Set.of(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR); @@ -2117,7 +2180,7 @@ public void testUpdateApiKeysAutoUpdatesLegacySuperuserRoleDescriptor() throws E currentSuperuserRoleDescriptors ) ); - expectRoleDescriptorsForApiKey("limited_by_role_descriptors", currentSuperuserRoleDescriptors, getApiKeyDocument(apiKeyId)); + expectAttributesForApiKey(apiKeyId, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, currentSuperuserRoleDescriptors)); // Second update is noop because role descriptors were auto-updated by the previous request assertSingleNoop( apiKeyId, @@ -2226,6 +2289,56 @@ private void doTestUpdateApiKeysNotFound(final UpdateApiKeyRequest request) { assertThat(ex.getMessage(), containsString("no API key owned by requesting user found for ID [" + request.getId() + "]")); } + private enum ApiKeyAttribute { + CREATOR, + METADATA, + ASSIGNED_ROLE_DESCRIPTORS, + LIMITED_BY_ROLE_DESCRIPTORS + } + + // Check attributes with both the raw document and the get api key response whenever possible + @SuppressWarnings("unchecked") + private void expectAttributesForApiKey(String apiKeyId, Map attributes) throws IOException { + final Map apiKeyDocMap = getApiKeyDocument(apiKeyId); + final PlainActionFuture future = new PlainActionFuture<>(); + client().execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(apiKeyId, false), future); + final GetApiKeyResponse getApiKeyResponse = future.actionGet(); + assertThat(getApiKeyResponse.getApiKeyInfos(), arrayWithSize(1)); + final ApiKey apiKeyInfo = getApiKeyResponse.getApiKeyInfos()[0]; + + for (Map.Entry entry : attributes.entrySet()) { + switch (entry.getKey()) { + case CREATOR -> { + final var creatorMap = (Map) entry.getValue(); + expectCreatorForApiKey(creatorMap, apiKeyDocMap); + assertThat(creatorMap.get("principal"), equalTo(apiKeyInfo.getUsername())); + assertThat(creatorMap.get("realm"), equalTo(apiKeyInfo.getRealm())); + } + case METADATA -> { + final var metadata = (Map) entry.getValue(); + expectMetadataForApiKey(metadata, apiKeyDocMap); + assertThat(metadata, equalTo(apiKeyInfo.getMetadata())); + } + case ASSIGNED_ROLE_DESCRIPTORS -> { + final var expectedRoleDescriptors = (Collection) entry.getValue(); + expectRoleDescriptorsForApiKey("role_descriptors", expectedRoleDescriptors, apiKeyDocMap); + assertThat(expectedRoleDescriptors, containsInAnyOrder(apiKeyInfo.getRoleDescriptors().toArray(RoleDescriptor[]::new))); + } + case LIMITED_BY_ROLE_DESCRIPTORS -> { + final var expectedRoleDescriptors = (Collection) entry.getValue(); + expectRoleDescriptorsForApiKey("limited_by_role_descriptors", expectedRoleDescriptors, apiKeyDocMap); + } + default -> throw new IllegalStateException("unexpected attribute name"); + } + } + } + + private void expectAttributesForApiKeys(List apiKeyIds, Map attributes) throws IOException { + for (String apiKeyId : apiKeyIds) { + expectAttributesForApiKey(apiKeyId, attributes); + } + } + private void expectMetadataForApiKey(final Map expectedMetadata, final Map actualRawApiKeyDoc) { assertNotNull(actualRawApiKeyDoc); @SuppressWarnings("unchecked") @@ -2263,16 +2376,6 @@ private void expectRoleDescriptorsForApiKey( } } - private void expectRoleDescriptorsForApiKeys( - final String roleDescriptorType, - final Collection expectedRoleDescriptors, - final List> actualRawApiKeyDocs - ) throws IOException { - for (Map actualDoc : actualRawApiKeyDocs) { - expectRoleDescriptorsForApiKey(roleDescriptorType, expectedRoleDescriptors, actualDoc); - } - } - private Map getApiKeyDocument(String apiKeyId) { return client().execute(GetAction.INSTANCE, new GetRequest(SECURITY_MAIN_ALIAS, apiKeyId)).actionGet().getSource(); } @@ -2333,11 +2436,21 @@ private void verifyGetResponse( int expectedNumberOfApiKeys, List responses, List> metadatas, + List expectedRoleDescriptors, GetApiKeyResponse response, Set validApiKeyIds, List invalidatedApiKeyIds ) { - verifyGetResponse(ES_TEST_ROOT_USER, expectedNumberOfApiKeys, responses, metadatas, response, validApiKeyIds, invalidatedApiKeyIds); + verifyGetResponse( + ES_TEST_ROOT_USER, + expectedNumberOfApiKeys, + responses, + metadatas, + expectedRoleDescriptors, + response, + validApiKeyIds, + invalidatedApiKeyIds + ); } private void verifyGetResponse( @@ -2345,6 +2458,7 @@ private void verifyGetResponse( int expectedNumberOfApiKeys, List responses, List> metadatas, + List expectedRoleDescriptors, GetApiKeyResponse response, Set validApiKeyIds, List invalidatedApiKeyIds @@ -2354,6 +2468,7 @@ private void verifyGetResponse( expectedNumberOfApiKeys, responses, metadatas, + expectedRoleDescriptors, response, validApiKeyIds, invalidatedApiKeyIds @@ -2365,6 +2480,7 @@ private void verifyGetResponse( int expectedNumberOfApiKeys, List responses, List> metadatas, + List expectedRoleDescriptors, GetApiKeyResponse response, Set validApiKeyIds, List invalidatedApiKeyIds @@ -2413,6 +2529,13 @@ private void verifyGetResponse( assertThat(apiKey.getMetadata(), equalTo(metadata == null ? Map.of() : metadata)); } } + Arrays.stream(response.getApiKeyInfos()) + .forEach( + apiKeyInfo -> assertThat( + apiKeyInfo.getRoleDescriptors(), + containsInAnyOrder(expectedRoleDescriptors.toArray(RoleDescriptor[]::new)) + ) + ); } private Tuple> createApiKey(String user, TimeValue expiration) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index a9e1a116292d5..24280241a6e68 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -840,7 +840,7 @@ void validateApiKeyCredentials( if (apiKeyAuthCache != null) { apiKeyAuthCache.invalidate(docId); } - listener.onResponse(AuthenticationResult.unsuccessful("api key has been invalidated", null)); + listener.onResponse(AuthenticationResult.unsuccessful("api key [" + credentials.getId() + "] has been invalidated", null)); } else { if (apiKeyDoc.hash == null) { throw new IllegalStateException("api key hash is missing"); @@ -1212,7 +1212,7 @@ public void invalidateApiKeys( apiKeyIds, true, false, - ApiKeyService::convertSearchHitToApiKeyInfo, + this::convertSearchHitToApiKeyInfo, ActionListener.wrap(apiKeys -> { if (apiKeys.isEmpty()) { logger.debug( @@ -1593,7 +1593,7 @@ public void getApiKeys( apiKeyIds, false, false, - ApiKeyService::convertSearchHitToApiKeyInfo, + this::convertSearchHitToApiKeyInfo, ActionListener.wrap(apiKeyInfos -> { if (apiKeyInfos.isEmpty()) { logger.debug( @@ -1636,7 +1636,7 @@ public void queryApiKeys(SearchRequest searchRequest, ActionListener apiKeyItem = Arrays.stream(searchResponse.getHits().getHits()) - .map(ApiKeyService::convertSearchHitToQueryItem) + .map(this::convertSearchHitToQueryItem) .toList(); listener.onResponse(new QueryApiKeyResponse(total, apiKeyItem)); }, listener::onFailure) @@ -1645,33 +1645,33 @@ public void queryApiKeys(SearchRequest searchRequest, ActionListener source = hit.getSourceAsMap(); - String name = (String) source.get("name"); - String id = hit.getId(); - Long creation = (Long) source.get("creation_time"); - Long expiration = (Long) source.get("expiration_time"); - Boolean invalidated = (Boolean) source.get("api_key_invalidated"); - @SuppressWarnings("unchecked") - String username = (String) ((Map) source.get("creator")).get("principal"); - @SuppressWarnings("unchecked") - String realm = (String) ((Map) source.get("creator")).get("realm"); - @SuppressWarnings("unchecked") - Map metadata = (Map) source.get("metadata_flattened"); + private ApiKey convertSearchHitToApiKeyInfo(SearchHit hit) { + final ApiKeyDoc apiKeyDoc = convertSearchHitToVersionedApiKeyDoc(hit).doc; + final String apiKeyId = hit.getId(); + final Map metadata = apiKeyDoc.metadataFlattened != null + ? XContentHelper.convertToMap(apiKeyDoc.metadataFlattened, false, XContentType.JSON).v2() + : Map.of(); + + final List roleDescriptors = parseRoleDescriptorsBytes( + apiKeyId, + apiKeyDoc.roleDescriptorsBytes, + RoleReference.ApiKeyRoleType.ASSIGNED + ); return new ApiKey( - name, - id, - Instant.ofEpochMilli(creation), - (expiration != null) ? Instant.ofEpochMilli(expiration) : null, - invalidated, - username, - realm, - metadata + apiKeyDoc.name, + apiKeyId, + Instant.ofEpochMilli(apiKeyDoc.creationTime), + apiKeyDoc.expirationTime != -1 ? Instant.ofEpochMilli(apiKeyDoc.expirationTime) : null, + apiKeyDoc.invalidated, + (String) apiKeyDoc.creator.get("principal"), + (String) apiKeyDoc.creator.get("realm"), + metadata, + roleDescriptors ); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 9a1014a96c1aa..56b9d81057aed 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -82,13 +82,13 @@ import org.elasticsearch.xpack.core.security.authc.support.AuthenticationContextSerializer; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.store.RoleReference; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.ApiKeyService.ApiKeyCredentials; import org.elasticsearch.xpack.security.authc.ApiKeyService.ApiKeyDoc; import org.elasticsearch.xpack.security.authc.ApiKeyService.CachedApiKeyHashResult; -import org.elasticsearch.xpack.security.authz.RoleDescriptorTests; import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; import org.elasticsearch.xpack.security.support.CacheInvalidatorRegistry; import org.elasticsearch.xpack.security.support.FeatureNotEnabledException; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index 417757cdaf71d..bc070cbe4e354 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.time.Instant; import java.time.temporal.ChronoUnit; @@ -40,6 +41,7 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests.randomUniquelyNamedRoleDescriptors; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; @@ -87,9 +89,10 @@ public void sendResponse(RestResponse restResponse) { final Instant expiration = randomFrom(Arrays.asList(null, Instant.now().plus(10, ChronoUnit.DAYS))); @SuppressWarnings("unchecked") final Map metadata = ApiKeyTests.randomMetadata(); + final List roleDescriptors = randomUniquelyNamedRoleDescriptors(0, 3); final GetApiKeyResponse getApiKeyResponseExpected = new GetApiKeyResponse( Collections.singletonList( - new ApiKey("api-key-name-1", "api-key-id-1", creation, expiration, false, "user-x", "realm-1", metadata) + new ApiKey("api-key-name-1", "api-key-id-1", creation, expiration, false, "user-x", "realm-1", metadata, roleDescriptors) ) ); @@ -140,7 +143,17 @@ public void doE assertThat( actual.getApiKeyInfos(), arrayContaining( - new ApiKey("api-key-name-1", "api-key-id-1", creation, expiration, false, "user-x", "realm-1", metadata) + new ApiKey( + "api-key-name-1", + "api-key-id-1", + creation, + expiration, + false, + "user-x", + "realm-1", + metadata, + roleDescriptors + ) ) ); } @@ -177,7 +190,8 @@ public void sendResponse(RestResponse restResponse) { false, "user-x", "realm-1", - ApiKeyTests.randomMetadata() + ApiKeyTests.randomMetadata(), + randomUniquelyNamedRoleDescriptors(0, 3) ); final ApiKey apiKey2 = new ApiKey( "api-key-name-2", @@ -187,7 +201,8 @@ public void sendResponse(RestResponse restResponse) { false, "user-y", "realm-1", - ApiKeyTests.randomMetadata() + ApiKeyTests.randomMetadata(), + randomUniquelyNamedRoleDescriptors(0, 3) ); final GetApiKeyResponse getApiKeyResponseExpectedWhenOwnerFlagIsTrue = new GetApiKeyResponse(Collections.singletonList(apiKey1)); final GetApiKeyResponse getApiKeyResponseExpectedWhenOwnerFlagIsFalse = new GetApiKeyResponse(List.of(apiKey1, apiKey2)); From 254e6bcabd18c26338e075287bd4d74b8b0e2257 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 9 Aug 2022 09:17:34 +0200 Subject: [PATCH 141/265] Remove needless optimization ShardRouting.asList (#89179) This iterator is never used in hot code (there seems to only be a single production code usage for it), no need to cache a list here just for it. --- .../org/elasticsearch/cluster/routing/ShardRouting.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index eabf4dde8581d..8690ab1129568 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -21,7 +21,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -44,7 +43,6 @@ public final class ShardRouting implements Writeable, ToXContentObject { private final RecoverySource recoverySource; private final UnassignedInfo unassignedInfo; private final AllocationId allocationId; - private final transient List asList; private final long expectedShardSize; @Nullable private final ShardRouting targetRelocatingShard; @@ -74,7 +72,6 @@ public final class ShardRouting implements Writeable, ToXContentObject { this.allocationId = allocationId; this.expectedShardSize = expectedShardSize; this.targetRelocatingShard = initializeTargetRelocatingShard(); - this.asList = Collections.singletonList(this); assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; @@ -271,7 +268,7 @@ public ShardId shardId() { * A shard iterator with just this shard in it. */ public ShardIterator shardsIt() { - return new PlainShardIterator(shardId, asList); + return new PlainShardIterator(shardId, List.of(this)); } public ShardRouting(ShardId shardId, StreamInput in) throws IOException { @@ -294,7 +291,6 @@ public ShardRouting(ShardId shardId, StreamInput in) throws IOException { shardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; } expectedShardSize = shardSize; - asList = Collections.singletonList(this); targetRelocatingShard = initializeTargetRelocatingShard(); } From c6c05bb62570e62ced43d0590dad65ef4aa9c862 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 9 Aug 2022 11:08:13 +0200 Subject: [PATCH 142/265] Deduplicate ShardRouting instances when building ClusterInfo (#89190) The equality checks on these in `DiskThresholdDecider` become very expensive during reroute in a large cluster. Deduplicating these when building the `ClusterInfo` saves more than 2% CPU time during many-shards benchmark bootstrapping because the lookup of the shard data path by shard-routing mostly hit instance equality. Also, this saves a little memory. This PR also moves the callback for building `ClusterInfo` from the stats response to the management pool as it is now more expensive (though the overall CPU use from it is trivial relative to the cost savings during reroute) and was questionable to run on a transport thread in a large cluster to begin with. Co-authored-by: David Turner --- .../cluster/InternalClusterInfoService.java | 155 ++++++++++-------- .../cluster/routing/RoutingTable.java | 27 +++ .../cluster/routing/ShardRouting.java | 2 +- .../decider/DiskThresholdDecider.java | 8 - .../elasticsearch/cluster/DiskUsageTests.java | 10 +- ...rnalClusterInfoServiceSchedulingTests.java | 1 + 6 files changed, 126 insertions(+), 77 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index ba9d87e7d1d6c..eb6ac9130906d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -18,12 +18,13 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.service.ClusterService; @@ -93,6 +94,8 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt private final Object mutex = new Object(); private final List> nextRefreshListeners = new ArrayList<>(); + + private final ClusterService clusterService; private AsyncRefresh currentRefresh; private RefreshScheduler refreshScheduler; @@ -102,6 +105,7 @@ public InternalClusterInfoService(Settings settings, ClusterService clusterServi this.indicesStatsSummary = IndicesStatsSummary.EMPTY; this.threadPool = threadPool; this.client = client; + this.clusterService = clusterService; this.updateFrequency = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings); this.fetchTimeout = INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.get(settings); this.enabled = DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); @@ -191,76 +195,92 @@ private void fetchIndicesStats() { indicesStatsRequest.store(true); indicesStatsRequest.indicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_CLOSED_HIDDEN); indicesStatsRequest.timeout(fetchTimeout); - client.admin().indices().stats(indicesStatsRequest, ActionListener.runAfter(new ActionListener<>() { - @Override - public void onResponse(IndicesStatsResponse indicesStatsResponse) { - logger.trace("received indices stats response"); - - if (indicesStatsResponse.getShardFailures().length > 0) { - final Set failedNodeIds = new HashSet<>(); - for (final DefaultShardOperationFailedException shardFailure : indicesStatsResponse.getShardFailures()) { - if (shardFailure.getCause()instanceof final FailedNodeException failedNodeException) { - if (failedNodeIds.add(failedNodeException.nodeId())) { - logger.warn( - () -> format("failed to retrieve shard stats from node [%s]", failedNodeException.nodeId()), - failedNodeException.getCause() - ); + client.admin() + .indices() + .stats( + indicesStatsRequest, + new ThreadedActionListener<>( + logger, + threadPool, + ThreadPool.Names.MANAGEMENT, + ActionListener.runAfter(new ActionListener<>() { + @Override + public void onResponse(IndicesStatsResponse indicesStatsResponse) { + logger.trace("received indices stats response"); + + if (indicesStatsResponse.getShardFailures().length > 0) { + final Set failedNodeIds = new HashSet<>(); + for (final var shardFailure : indicesStatsResponse.getShardFailures()) { + if (shardFailure.getCause()instanceof final FailedNodeException failedNodeException) { + if (failedNodeIds.add(failedNodeException.nodeId())) { + logger.warn( + () -> format( + "failed to retrieve shard stats from node [%s]", + failedNodeException.nodeId() + ), + failedNodeException.getCause() + ); + } + logger.trace( + () -> format( + "failed to retrieve stats for shard [%s][%s]", + shardFailure.index(), + shardFailure.shardId() + ), + shardFailure.getCause() + ); + } else { + logger.warn( + () -> format( + "failed to retrieve stats for shard [%s][%s]", + shardFailure.index(), + shardFailure.shardId() + ), + shardFailure.getCause() + ); + } + } } - logger.trace( - () -> format( - "failed to retrieve stats for shard [%s][%s]", - shardFailure.index(), - shardFailure.shardId() - ), - shardFailure.getCause() - ); - } else { - logger.warn( - () -> format( - "failed to retrieve stats for shard [%s][%s]", - shardFailure.index(), - shardFailure.shardId() - ), - shardFailure.getCause() - ); - } - } - } - final ShardStats[] stats = indicesStatsResponse.getShards(); - final Map shardSizeByIdentifierBuilder = new HashMap<>(); - final Map shardDataSetSizeBuilder = new HashMap<>(); - final Map dataPathByShardRoutingBuilder = new HashMap<>(); - final Map reservedSpaceBuilders = new HashMap<>(); - buildShardLevelInfo( - stats, - shardSizeByIdentifierBuilder, - shardDataSetSizeBuilder, - dataPathByShardRoutingBuilder, - reservedSpaceBuilders - ); + final ShardStats[] stats = indicesStatsResponse.getShards(); + final Map shardSizeByIdentifierBuilder = new HashMap<>(); + final Map shardDataSetSizeBuilder = new HashMap<>(); + final Map dataPathByShardRoutingBuilder = new HashMap<>(); + final Map reservedSpaceBuilders = + new HashMap<>(); + buildShardLevelInfo( + clusterService.state().routingTable(), + stats, + shardSizeByIdentifierBuilder, + shardDataSetSizeBuilder, + dataPathByShardRoutingBuilder, + reservedSpaceBuilders + ); - final Map rsrvdSpace = new HashMap<>(); - reservedSpaceBuilders.forEach((nodeAndPath, builder) -> rsrvdSpace.put(nodeAndPath, builder.build())); + final Map rsrvdSpace = new HashMap<>(); + reservedSpaceBuilders.forEach((nodeAndPath, builder) -> rsrvdSpace.put(nodeAndPath, builder.build())); - indicesStatsSummary = new IndicesStatsSummary( - Map.copyOf(shardSizeByIdentifierBuilder), - Map.copyOf(shardDataSetSizeBuilder), - Map.copyOf(dataPathByShardRoutingBuilder), - Map.copyOf(rsrvdSpace) - ); - } + indicesStatsSummary = new IndicesStatsSummary( + Map.copyOf(shardSizeByIdentifierBuilder), + Map.copyOf(shardDataSetSizeBuilder), + Map.copyOf(dataPathByShardRoutingBuilder), + Map.copyOf(rsrvdSpace) + ); + } - @Override - public void onFailure(Exception e) { - if (e instanceof ClusterBlockException) { - logger.trace("failed to retrieve indices stats", e); - } else { - logger.warn("failed to retrieve indices stats", e); - } - indicesStatsSummary = IndicesStatsSummary.EMPTY; - } - }, this::onStatsProcessed)); + @Override + public void onFailure(Exception e) { + if (e instanceof ClusterBlockException) { + logger.trace("failed to retrieve indices stats", e); + } else { + logger.warn("failed to retrieve indices stats", e); + } + indicesStatsSummary = IndicesStatsSummary.EMPTY; + } + }, this::onStatsProcessed), + false + ) + ); } private void fetchNodeStats() { @@ -426,6 +446,7 @@ public void addListener(Consumer clusterInfoConsumer) { } static void buildShardLevelInfo( + RoutingTable routingTable, ShardStats[] stats, Map shardSizes, Map shardDataSetSizeBuilder, @@ -433,7 +454,7 @@ static void buildShardLevelInfo( Map reservedSpaceByShard ) { for (ShardStats s : stats) { - final ShardRouting shardRouting = s.getShardRouting(); + final ShardRouting shardRouting = routingTable.deduplicate(s.getShardRouting()); newShardRoutingToDataPath.put(shardRouting, s.getDataPath()); final StoreStats storeStats = s.getStats().getStore(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index d32e46cfb14fb..56eb57121e439 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -147,6 +147,33 @@ public IndexShardRoutingTable shardRoutingTable(ShardId shardId) { return shard; } + /** + * Try to deduplicate the given shard routing with an equal instance found in this routing table. This is used by the logic of the + * {@link org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider} and + * {@link org.elasticsearch.cluster.InternalClusterInfoService} to deduplicate instances created by a master node and those read from + * the network to speed up the use of {@link ShardRouting} as a map key in {@link org.elasticsearch.cluster.ClusterInfo#getDataPath}. + * + * @param shardRouting shard routing to deduplicate + * @return deduplicated shard routing from this routing table if an equivalent shard routing was found or the given instance otherwise + */ + public ShardRouting deduplicate(ShardRouting shardRouting) { + final IndexRoutingTable indexShardRoutingTable = indicesRouting.get(shardRouting.index().getName()); + if (indexShardRoutingTable == null) { + return shardRouting; + } + final IndexShardRoutingTable shardRoutingTable = indexShardRoutingTable.shard(shardRouting.id()); + if (shardRoutingTable == null) { + return shardRouting; + } + for (int i = 0; i < shardRoutingTable.size(); i++) { + ShardRouting found = shardRoutingTable.shard(i); + if (shardRouting.equals(found)) { + return found; + } + } + return shardRouting; + } + @Nullable public ShardRouting getByAllocationId(ShardId shardId, String allocationId) { final IndexRoutingTable indexRoutingTable = index(shardId.getIndex()); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 8690ab1129568..d18106769a468 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -671,7 +671,7 @@ public boolean isRelocationSourceOf(ShardRouting other) { /** returns true if the current routing is identical to the other routing in all but meta fields, i.e., unassigned info */ public boolean equalsIgnoringMetadata(ShardRouting other) { return primary == other.primary - && Objects.equals(shardId, other.shardId) + && shardId.equals(other.shardId) && Objects.equals(currentNodeId, other.currentNodeId) && Objects.equals(relocatingNodeId, other.relocatingNodeId) && Objects.equals(allocationId, other.allocationId) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 076636f869485..c32e7aecdcdd1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -605,14 +605,6 @@ String getPath() { return diskUsage.getPath(); } - String getNodeId() { - return diskUsage.getNodeId(); - } - - String getNodeName() { - return diskUsage.getNodeName(); - } - long getTotalBytes() { return diskUsage.getTotalBytes(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 69cd7de2f047d..920047582fa3b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -124,7 +125,14 @@ public void testFillShardLevelInfo() { Map shardSizes = new HashMap<>(); Map shardDataSetSizes = new HashMap<>(); Map routingToPath = new HashMap<>(); - InternalClusterInfoService.buildShardLevelInfo(stats, shardSizes, shardDataSetSizes, routingToPath, new HashMap<>()); + InternalClusterInfoService.buildShardLevelInfo( + RoutingTable.EMPTY_ROUTING_TABLE, + stats, + shardSizes, + shardDataSetSizes, + routingToPath, + new HashMap<>() + ); assertEquals(2, shardSizes.size()); assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_0))); assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_1))); diff --git a/server/src/test/java/org/elasticsearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/elasticsearch/cluster/InternalClusterInfoServiceSchedulingTests.java index c4bbc7a12f813..d13df9ec3cec7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -105,6 +105,7 @@ protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { setFlagOnSuccess(becameMaster2) ); runUntilFlag(deterministicTaskQueue, becameMaster2); + deterministicTaskQueue.runAllRunnableTasks(); for (int i = 0; i < 3; i++) { final int initialRequestCount = client.requestCount; From 80eeca74e4332c967787aab7d6103e43ba44a88b Mon Sep 17 00:00:00 2001 From: David Roberts Date: Tue, 9 Aug 2022 10:54:03 +0100 Subject: [PATCH 143/265] [ML] Confirm platinum license for experimental ML aggregations (#89117) We have 5 experimental aggregations in the ML plugin that are not confirming a platinum license when they are used. Before they are made generally available we should plug this hole. The 5 aggregations are: 1. change_point 2. bucket_correlation 3. bucket_count_ks_test 4. frequent_items 5. categorize_text This PR also touches a sixth aggregation, namely inference. This was already platinum licensed but is given its own unique feature like the other five aggregations. In the future this will allow us to tell how popular the different aggregations are. (The principle of separate features per licensed aggregation is taken from the spatial plugin.) --- .../xpack/ml/MachineLearning.java | 68 +++++++- .../TransportInternalInferModelAction.java | 4 +- .../ChangePointAggregationBuilder.java | 6 - .../BucketCorrelationAggregationBuilder.java | 9 -- .../InferencePipelineAggregationBuilder.java | 4 +- .../BucketCountKSTestAggregationBuilder.java | 9 -- .../xpack/ml/MachineLearningTests.java | 146 +++++++++++------- ...CategorizeTextAggregationBuilderTests.java | 6 +- .../ChangePointAggregationBuilderTests.java | 5 +- ...ketCorrelationAggregationBuilderTests.java | 5 +- .../FrequentItemSetsTests.java | 6 +- ...ketCountKSTestAggregationBuilderTests.java | 5 +- 12 files changed, 166 insertions(+), 107 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 0467d174f8616..67d1ea312b34d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -53,6 +53,7 @@ import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.ingest.Processor; import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.LicensedFeature; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -77,6 +78,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xcontent.ContextParser; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingDeciderService; @@ -279,6 +281,7 @@ import org.elasticsearch.xpack.ml.aggs.categorization.InternalCategorizationAggregation; import org.elasticsearch.xpack.ml.aggs.changepoint.ChangePointAggregationBuilder; import org.elasticsearch.xpack.ml.aggs.changepoint.ChangePointNamedContentProvider; +import org.elasticsearch.xpack.ml.aggs.changepoint.InternalChangePointAggregation; import org.elasticsearch.xpack.ml.aggs.correlation.BucketCorrelationAggregationBuilder; import org.elasticsearch.xpack.ml.aggs.correlation.CorrelationNamedContentProvider; import org.elasticsearch.xpack.ml.aggs.frequentitemsets.FrequentItemSetsAggregationBuilder; @@ -286,6 +289,7 @@ import org.elasticsearch.xpack.ml.aggs.heuristic.PValueScore; import org.elasticsearch.xpack.ml.aggs.inference.InferencePipelineAggregationBuilder; import org.elasticsearch.xpack.ml.aggs.kstest.BucketCountKSTestAggregationBuilder; +import org.elasticsearch.xpack.ml.aggs.kstest.InternalKSTestAggregation; import org.elasticsearch.xpack.ml.annotations.AnnotationPersister; import org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingDeciderService; import org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingNamedWritableProvider; @@ -501,6 +505,37 @@ public class MachineLearning extends Plugin License.OperationMode.PLATINUM ); + private static final LicensedFeature.Momentary CATEGORIZE_TEXT_AGG_FEATURE = LicensedFeature.momentary( + MachineLearningField.ML_FEATURE_FAMILY, + "categorize-text-agg", + License.OperationMode.PLATINUM + ); + private static final LicensedFeature.Momentary FREQUENT_ITEM_SETS_AGG_FEATURE = LicensedFeature.momentary( + MachineLearningField.ML_FEATURE_FAMILY, + "frequent-items-agg", + License.OperationMode.PLATINUM + ); + public static final LicensedFeature.Momentary INFERENCE_AGG_FEATURE = LicensedFeature.momentary( + MachineLearningField.ML_FEATURE_FAMILY, + "inference-agg", + License.OperationMode.PLATINUM + ); + private static final LicensedFeature.Momentary CHANGE_POINT_AGG_FEATURE = LicensedFeature.momentary( + MachineLearningField.ML_FEATURE_FAMILY, + "change-point-agg", + License.OperationMode.PLATINUM + ); + private static final LicensedFeature.Momentary BUCKET_CORRELATION_AGG_FEATURE = LicensedFeature.momentary( + MachineLearningField.ML_FEATURE_FAMILY, + "bucket-correlation-agg", + License.OperationMode.PLATINUM + ); + private static final LicensedFeature.Momentary BUCKET_COUNT_KS_TEST_AGG_FEATURE = LicensedFeature.momentary( + MachineLearningField.ML_FEATURE_FAMILY, + "bucket-count-ks-test-agg", + License.OperationMode.PLATINUM + ); + @Override public Map getProcessors(Processor.Parameters parameters) { if (this.enabled == false) { @@ -1454,11 +1489,23 @@ public Map> getTokenizers() { @Override public List getPipelineAggregations() { - return Arrays.asList( + return List.of( InferencePipelineAggregationBuilder.buildSpec(modelLoadingService, getLicenseState(), settings), - BucketCorrelationAggregationBuilder.buildSpec(), - BucketCountKSTestAggregationBuilder.buildSpec(), - ChangePointAggregationBuilder.buildSpec() + new SearchPlugin.PipelineAggregationSpec( + BucketCorrelationAggregationBuilder.NAME, + BucketCorrelationAggregationBuilder::new, + checkAggLicense(BucketCorrelationAggregationBuilder.PARSER, BUCKET_CORRELATION_AGG_FEATURE) + ), + new SearchPlugin.PipelineAggregationSpec( + BucketCountKSTestAggregationBuilder.NAME, + BucketCountKSTestAggregationBuilder::new, + checkAggLicense(BucketCountKSTestAggregationBuilder.PARSER, BUCKET_COUNT_KS_TEST_AGG_FEATURE) + ).addResultReader(InternalKSTestAggregation::new), + new SearchPlugin.PipelineAggregationSpec( + ChangePointAggregationBuilder.NAME, + ChangePointAggregationBuilder::new, + checkAggLicense(ChangePointAggregationBuilder.PARSER, CHANGE_POINT_AGG_FEATURE) + ).addResultReader(InternalChangePointAggregation::new) ); } @@ -1467,19 +1514,28 @@ public List> getSignificanceHeuristics() { return List.of(new SignificanceHeuristicSpec<>(PValueScore.NAME, PValueScore::new, PValueScore.PARSER)); } + private ContextParser checkAggLicense(ContextParser realParser, LicensedFeature.Momentary feature) { + return (parser, name) -> { + if (feature.check(getLicenseState()) == false) { + throw LicenseUtils.newComplianceException(feature.getName()); + } + return realParser.parse(parser, name); + }; + } + @Override public List getAggregations() { return List.of( new AggregationSpec( CategorizeTextAggregationBuilder.NAME, CategorizeTextAggregationBuilder::new, - CategorizeTextAggregationBuilder.PARSER + checkAggLicense(CategorizeTextAggregationBuilder.PARSER, CATEGORIZE_TEXT_AGG_FEATURE) ).addResultReader(InternalCategorizationAggregation::new) .setAggregatorRegistrar(s -> s.registerUsage(CategorizeTextAggregationBuilder.NAME)), new AggregationSpec( FrequentItemSetsAggregationBuilder.NAME, FrequentItemSetsAggregationBuilder::new, - FrequentItemSetsAggregationBuilder.PARSER + checkAggLicense(FrequentItemSetsAggregationBuilder.PARSER, FREQUENT_ITEM_SETS_AGG_FEATURE) ).addResultReader(FrequentItemSetsAggregatorFactory.getResultReader()) .setAggregatorRegistrar(FrequentItemSetsAggregationBuilder::registerAggregators) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java index c87c796a2f0e0..92900eb05e5b8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.InferModelAction; import org.elasticsearch.xpack.core.ml.action.InferModelAction.Request; @@ -31,6 +30,7 @@ import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; +import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; @@ -98,7 +98,7 @@ protected void doExecute(Task task, Request request, ActionListener li Response.Builder responseBuilder = Response.builder(); TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); - if (MachineLearningField.ML_API_FEATURE.check(licenseState)) { + if (MachineLearning.INFERENCE_AGG_FEATURE.check(licenseState)) { responseBuilder.setLicensed(true); doInfer(task, request, responseBuilder, parentTaskId, listener); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilder.java index 64a76dec23621..1c0112e84e35f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilder.java @@ -10,7 +10,6 @@ import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; import org.elasticsearch.search.aggregations.pipeline.BucketMetricsPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -53,11 +52,6 @@ public ChangePointAggregationBuilder(StreamInput in) throws IOException { super(in, NAME.getPreferredName()); } - public static SearchPlugin.PipelineAggregationSpec buildSpec() { - return new SearchPlugin.PipelineAggregationSpec(NAME, ChangePointAggregationBuilder::new, ChangePointAggregationBuilder.PARSER) - .addResultReader(InternalChangePointAggregation::new); - } - @Override public String getWriteableName() { return NAME.getPreferredName(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilder.java index 10eb7311ba321..4c09a2bc2e745 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilder.java @@ -10,7 +10,6 @@ import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; import org.elasticsearch.search.aggregations.pipeline.BucketMetricsPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -59,14 +58,6 @@ public class BucketCorrelationAggregationBuilder extends BucketMetricsPipelineAg }, GAP_POLICY, ObjectParser.ValueType.STRING); } - public static SearchPlugin.PipelineAggregationSpec buildSpec() { - return new SearchPlugin.PipelineAggregationSpec( - NAME, - BucketCorrelationAggregationBuilder::new, - BucketCorrelationAggregationBuilder.PARSER - ); - } - private final CorrelationFunction correlationFunction; public BucketCorrelationAggregationBuilder(String name, String bucketsPath, CorrelationFunction correlationFunction) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java index 44a308519dd3b..6b485fd46575b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java @@ -30,7 +30,6 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfigUpdate; @@ -42,6 +41,7 @@ import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; @@ -268,7 +268,7 @@ public InferencePipelineAggregationBuilder rewrite(QueryRewriteContext context) loadedModel.set(localModel); boolean isLicensed = localModel.getLicenseLevel() == License.OperationMode.BASIC - || MachineLearningField.ML_API_FEATURE.check(licenseState); + || MachineLearning.INFERENCE_AGG_FEATURE.check(licenseState); if (isLicensed) { delegate.onResponse(null); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilder.java index eb6cc48a31635..17e8e769082e9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilder.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; -import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; import org.elasticsearch.search.aggregations.pipeline.BucketMetricsPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -124,14 +123,6 @@ public BucketCountKSTestAggregationBuilder(StreamInput in) throws IOException { this.samplingMethod = SamplingMethod.fromStream(in); } - public static SearchPlugin.PipelineAggregationSpec buildSpec() { - return new SearchPlugin.PipelineAggregationSpec( - NAME, - BucketCountKSTestAggregationBuilder::new, - BucketCountKSTestAggregationBuilder.PARSER - ).addResultReader(InternalKSTestAggregation::new); - } - @Override public String getWriteableName() { return NAME.getPreferredName(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java index 53cc5ddeadadd..8927573ba5d91 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction; +import java.io.IOException; import java.util.Collections; import java.util.Map; @@ -40,7 +41,7 @@ public class MachineLearningTests extends ESTestCase { @SuppressWarnings("unchecked") - public void testPrePostSystemIndexUpgrade_givenNotInUpgradeMode() { + public void testPrePostSystemIndexUpgrade_givenNotInUpgradeMode() throws IOException { ThreadPool threadpool = new TestThreadPool("test"); ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE); @@ -52,27 +53,40 @@ public void testPrePostSystemIndexUpgrade_givenNotInUpgradeMode() { return null; }).when(client).execute(same(SetUpgradeModeAction.INSTANCE), any(SetUpgradeModeAction.Request.class), any(ActionListener.class)); - MachineLearning machineLearning = createMachineLearning(Settings.EMPTY); - - SetOnce> response = new SetOnce<>(); - machineLearning.prepareForIndicesMigration(clusterService, client, ActionListener.wrap(response::set, e -> fail(e.getMessage()))); - - assertThat(response.get(), equalTo(Collections.singletonMap("already_in_upgrade_mode", false))); - verify(client).execute(same(SetUpgradeModeAction.INSTANCE), eq(new SetUpgradeModeAction.Request(true)), any(ActionListener.class)); - - machineLearning.indicesMigrationComplete( - response.get(), - clusterService, - client, - ActionListener.wrap(ESTestCase::assertTrue, e -> fail(e.getMessage())) - ); - - verify(client).execute(same(SetUpgradeModeAction.INSTANCE), eq(new SetUpgradeModeAction.Request(false)), any(ActionListener.class)); - - threadpool.shutdown(); + try (MachineLearning machineLearning = createTrialLicensedMachineLearning(Settings.EMPTY)) { + + SetOnce> response = new SetOnce<>(); + machineLearning.prepareForIndicesMigration( + clusterService, + client, + ActionListener.wrap(response::set, e -> fail(e.getMessage())) + ); + + assertThat(response.get(), equalTo(Collections.singletonMap("already_in_upgrade_mode", false))); + verify(client).execute( + same(SetUpgradeModeAction.INSTANCE), + eq(new SetUpgradeModeAction.Request(true)), + any(ActionListener.class) + ); + + machineLearning.indicesMigrationComplete( + response.get(), + clusterService, + client, + ActionListener.wrap(ESTestCase::assertTrue, e -> fail(e.getMessage())) + ); + + verify(client).execute( + same(SetUpgradeModeAction.INSTANCE), + eq(new SetUpgradeModeAction.Request(false)), + any(ActionListener.class) + ); + } finally { + threadpool.shutdown(); + } } - public void testPrePostSystemIndexUpgrade_givenAlreadyInUpgradeMode() { + public void testPrePostSystemIndexUpgrade_givenAlreadyInUpgradeMode() throws IOException { ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn( ClusterState.builder(ClusterName.DEFAULT) @@ -81,23 +95,28 @@ public void testPrePostSystemIndexUpgrade_givenAlreadyInUpgradeMode() { ); Client client = mock(Client.class); - MachineLearning machineLearning = createMachineLearning(Settings.EMPTY); + try (MachineLearning machineLearning = createTrialLicensedMachineLearning(Settings.EMPTY)) { - SetOnce> response = new SetOnce<>(); - machineLearning.prepareForIndicesMigration(clusterService, client, ActionListener.wrap(response::set, e -> fail(e.getMessage()))); + SetOnce> response = new SetOnce<>(); + machineLearning.prepareForIndicesMigration( + clusterService, + client, + ActionListener.wrap(response::set, e -> fail(e.getMessage())) + ); - assertThat(response.get(), equalTo(Collections.singletonMap("already_in_upgrade_mode", true))); - verifyNoMoreInteractions(client); + assertThat(response.get(), equalTo(Collections.singletonMap("already_in_upgrade_mode", true))); + verifyNoMoreInteractions(client); - machineLearning.indicesMigrationComplete( - response.get(), - clusterService, - client, - ActionListener.wrap(ESTestCase::assertTrue, e -> fail(e.getMessage())) - ); + machineLearning.indicesMigrationComplete( + response.get(), + clusterService, + client, + ActionListener.wrap(ESTestCase::assertTrue, e -> fail(e.getMessage())) + ); - // Neither pre nor post should have called any action - verifyNoMoreInteractions(client); + // Neither pre nor post should have called any action + verifyNoMoreInteractions(client); + } } public void testMaxOpenWorkersSetting_givenDefault() { @@ -141,7 +160,7 @@ public void testMaxMachineMemoryPercent_givenInvalidSetting() { ); } - public void testNoAttributes_givenNoClash() { + public void testNoAttributes_givenNoClash() throws IOException { Settings.Builder builder = Settings.builder(); if (randomBoolean()) { builder.put("xpack.ml.enabled", randomBoolean()); @@ -151,11 +170,12 @@ public void testNoAttributes_givenNoClash() { } builder.put("node.attr.foo", "abc"); builder.put("node.attr.ml.bar", "def"); - MachineLearning machineLearning = createMachineLearning(builder.put("path.home", createTempDir()).build()); - assertNotNull(machineLearning.additionalSettings()); + try (MachineLearning machineLearning = createTrialLicensedMachineLearning(builder.put("path.home", createTempDir()).build())) { + assertNotNull(machineLearning.additionalSettings()); + } } - public void testNoAttributes_givenSameAndMlEnabled() { + public void testNoAttributes_givenSameAndMlEnabled() throws IOException { Settings.Builder builder = Settings.builder(); if (randomBoolean()) { builder.put("xpack.ml.enabled", randomBoolean()); @@ -164,33 +184,43 @@ public void testNoAttributes_givenSameAndMlEnabled() { int maxOpenJobs = randomIntBetween(5, 15); builder.put("xpack.ml.max_open_jobs", maxOpenJobs); } - MachineLearning machineLearning = createMachineLearning(builder.put("path.home", createTempDir()).build()); - assertNotNull(machineLearning.additionalSettings()); + try (MachineLearning machineLearning = createTrialLicensedMachineLearning(builder.put("path.home", createTempDir()).build())) { + assertNotNull(machineLearning.additionalSettings()); + } } - public void testNoAttributes_givenClash() { + public void testNoAttributes_givenClash() throws IOException { Settings.Builder builder = Settings.builder(); builder.put("node.attr.ml.max_open_jobs", randomIntBetween(13, 15)); - MachineLearning machineLearning = createMachineLearning(builder.put("path.home", createTempDir()).build()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, machineLearning::additionalSettings); - assertThat(e.getMessage(), startsWith("Directly setting [node.attr.ml.")); - assertThat( - e.getMessage(), - containsString( - "] is not permitted - " - + "it is reserved for machine learning. If your intention was to customize machine learning, set the [xpack.ml." - ) - ); + try (MachineLearning machineLearning = createTrialLicensedMachineLearning(builder.put("path.home", createTempDir()).build())) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, machineLearning::additionalSettings); + assertThat(e.getMessage(), startsWith("Directly setting [node.attr.ml.")); + assertThat( + e.getMessage(), + containsString( + "] is not permitted - " + + "it is reserved for machine learning. If your intention was to customize machine learning, set the [xpack.ml." + ) + ); + } } - private MachineLearning createMachineLearning(Settings settings) { - XPackLicenseState licenseState = mock(XPackLicenseState.class); + public static class TrialLicensedMachineLearning extends MachineLearning { + + // A license state constructed like this is considered a trial license + XPackLicenseState licenseState = new XPackLicenseState(() -> 0L); + + public TrialLicensedMachineLearning(Settings settings) { + super(settings); + } + + @Override + protected XPackLicenseState getLicenseState() { + return licenseState; + } + } - return new MachineLearning(settings) { - @Override - protected XPackLicenseState getLicenseState() { - return licenseState; - } - }; + public static MachineLearning createTrialLicensedMachineLearning(Settings settings) { + return new TrialLicensedMachineLearning(settings); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilderTests.java index 3467992d7b6b6..0f819ec9516e1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilderTests.java @@ -9,11 +9,11 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.MachineLearningTests; import org.elasticsearch.xpack.ml.job.config.CategorizationAnalyzerConfigTests; import java.util.Collection; -import java.util.Collections; +import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -21,7 +21,7 @@ public class CategorizeTextAggregationBuilderTests extends BaseAggregationTestCa @Override protected Collection> getExtraPlugins() { - return Collections.singletonList(MachineLearning.class); + return List.of(MachineLearningTests.TrialLicensedMachineLearning.class); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilderTests.java index fb0080661017c..2db1149edf80d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilderTests.java @@ -10,15 +10,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; -import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.MachineLearningTests; -import java.util.Collections; import java.util.List; public class ChangePointAggregationBuilderTests extends BasePipelineAggregationTestCase { @Override protected List plugins() { - return Collections.singletonList(new MachineLearning(Settings.EMPTY)); + return List.of(MachineLearningTests.createTrialLicensedMachineLearning(Settings.EMPTY)); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilderTests.java index 2595a085183b6..9e9b5c52f16f0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilderTests.java @@ -16,9 +16,8 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.MachineLearningTests; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -31,7 +30,7 @@ public class BucketCorrelationAggregationBuilderTests extends BasePipelineAggreg @Override protected List plugins() { - return Collections.singletonList(new MachineLearning(Settings.EMPTY)); + return List.of(MachineLearningTests.createTrialLicensedMachineLearning(Settings.EMPTY)); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsTests.java index 80bd982371132..068947857e934 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsTests.java @@ -9,10 +9,10 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.MachineLearningTests; import java.util.Collection; -import java.util.Collections; +import java.util.List; import static org.elasticsearch.xpack.ml.aggs.frequentitemsets.FrequentItemSetsAggregationBuilderTests.randomFrequentItemsSetsAggregationBuilder; @@ -20,7 +20,7 @@ public class FrequentItemSetsTests extends BaseAggregationTestCase> getExtraPlugins() { - return Collections.singletonList(MachineLearning.class); + return List.of(MachineLearningTests.TrialLicensedMachineLearning.class); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilderTests.java index 06d48d79d163b..01b12ed404f07 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilderTests.java @@ -15,9 +15,8 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.MachineLearningTests; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -32,7 +31,7 @@ public class BucketCountKSTestAggregationBuilderTests extends BasePipelineAggreg @Override protected List plugins() { - return Collections.singletonList(new MachineLearning(Settings.EMPTY)); + return List.of(MachineLearningTests.createTrialLicensedMachineLearning(Settings.EMPTY)); } @Override From 9dd47d8a925c641f061d7fc1652bbd2e5012efd3 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Tue, 9 Aug 2022 13:49:55 +0200 Subject: [PATCH 144/265] Account for `null` metadata in update API key test (#89195) When the metadata field on the raw API key doc is null, the GET API automatically translates it to an empty map. This PR fixes a failing test by accounting for this difference in a test assertion. Closes #89193 --- .../xpack/security/authc/ApiKeyIntegTests.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index b5fec28365bba..a593616b9ba9a 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -128,6 +128,7 @@ import static org.elasticsearch.xpack.security.Security.SECURITY_CRYPTO_THREAD_POOL_NAME; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -2343,7 +2344,13 @@ private void expectMetadataForApiKey(final Map expectedMetadata, assertNotNull(actualRawApiKeyDoc); @SuppressWarnings("unchecked") final var actualMetadata = (Map) actualRawApiKeyDoc.get("metadata_flattened"); - assertThat("for api key doc " + actualRawApiKeyDoc, actualMetadata, equalTo(expectedMetadata)); + // Internally, metadata may be stored as `null`. However, it is always exposed as an empty map through the API. We define + // `expectedMetadata` as the expected value according to the API, so we need to account for this discrepancy here + if (expectedMetadata.isEmpty()) { + assertThat("for api key doc " + actualRawApiKeyDoc, actualMetadata, anyOf(nullValue(), anEmptyMap())); + } else { + assertThat("for api key doc " + actualRawApiKeyDoc, actualMetadata, equalTo(expectedMetadata)); + } } private void expectCreatorForApiKey(final Map expectedCreator, final Map actualRawApiKeyDoc) { From c9d4892929080246af7bf0ab0dfda9ffdec2eee2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 9 Aug 2022 13:15:37 +0100 Subject: [PATCH 145/265] Weaken language about "low-latency" networks (#89198) Today we say that voting-only nodes require a "low-latency" network. This term has a specific meaning in some operating environments which is different from our intended meaning. To avoid this confusion this commit removes the absolute term "low-latency" in favour of describing the requirements relative to the user's own performance goals. --- .../high-availability/cluster-design.asciidoc | 16 ++-- .../modules/discovery/publishing.asciidoc | 75 +++++++++++-------- docs/reference/modules/node.asciidoc | 21 ++++-- 3 files changed, 66 insertions(+), 46 deletions(-) diff --git a/docs/reference/high-availability/cluster-design.asciidoc b/docs/reference/high-availability/cluster-design.asciidoc index 919e026bbeea2..bddc8c206a7af 100644 --- a/docs/reference/high-availability/cluster-design.asciidoc +++ b/docs/reference/high-availability/cluster-design.asciidoc @@ -338,12 +338,16 @@ You should use <> to ensure that there is a copy of each shard in each zone. This means either zone remains fully available if the other zone fails. -All master-eligible nodes, including voting-only nodes, are on the critical path -for publishing cluster state updates. Because of this, these nodes require -reasonably fast persistent storage and a reliable, low-latency network -connection to the rest of the cluster. If you add a tiebreaker node in a third -independent zone then you must make sure it has adequate resources and good -connectivity to the rest of the cluster. +All master-eligible nodes, including voting-only nodes, are on the critical +path for <>. Cluster +state updates are usually independent of performance-critical workloads such as +indexing or searches, but they are involved in management activities such as +index creation and rollover, mapping updates, and recovery after a failure. The +performance characteristics of these activities are a function of the speed of +the storage on each master-eligible node, as well as the reliability and +latency of the network interconnections between all nodes in the cluster. You +must therefore ensure that the storage and networking available to the +nodes in your cluster are good enough to meet your performance goals. [[high-availability-cluster-design-three-zones]] ==== Clusters with three or more zones diff --git a/docs/reference/modules/discovery/publishing.asciidoc b/docs/reference/modules/discovery/publishing.asciidoc index 208386946d3fb..af664585085c2 100644 --- a/docs/reference/modules/discovery/publishing.asciidoc +++ b/docs/reference/modules/discovery/publishing.asciidoc @@ -1,38 +1,40 @@ [[cluster-state-publishing]] === Publishing the cluster state -The master node is the only node in a cluster that can make changes to the -cluster state. The master node processes one batch of cluster state updates at -a time, computing the required changes and publishing the updated cluster state -to all the other nodes in the cluster. Each publication starts with the master -broadcasting the updated cluster state to all nodes in the cluster. Each node -responds with an acknowledgement but does not yet apply the newly-received -state. Once the master has collected acknowledgements from enough -master-eligible nodes, the new cluster state is said to be _committed_ and the -master broadcasts another message instructing nodes to apply the now-committed -state. Each node receives this message, applies the updated state, and then -sends a second acknowledgement back to the master. +The elected master node is the only node in a cluster that can make changes to +the cluster state. The elected master node processes one batch of cluster state +updates at a time, computing the required changes and publishing the updated +cluster state to all the other nodes in the cluster. Each publication starts +with the elected master broadcasting the updated cluster state to all nodes in +the cluster. Each node responds with an acknowledgement but does not yet apply +the newly-received state. Once the elected master has collected +acknowledgements from enough master-eligible nodes, the new cluster state is +said to be _committed_ and the master broadcasts another message instructing +nodes to apply the now-committed state. Each node receives this message, +applies the updated state, and then sends a second acknowledgement back to the +master. -The master allows a limited amount of time for each cluster state update to be -completely published to all nodes. It is defined by the +The elected master allows a limited amount of time for each cluster state +update to be completely published to all nodes. It is defined by the `cluster.publish.timeout` setting, which defaults to `30s`, measured from the time the publication started. If this time is reached before the new cluster -state is committed then the cluster state change is rejected and the master -considers itself to have failed. It stands down and starts trying to elect a -new master. +state is committed then the cluster state change is rejected and the elected +master considers itself to have failed. It stands down and starts trying to +elect a new master node. If the new cluster state is committed before `cluster.publish.timeout` has -elapsed, the master node considers the change to have succeeded. It waits until -the timeout has elapsed or until it has received acknowledgements that each -node in the cluster has applied the updated state, and then starts processing -and publishing the next cluster state update. If some acknowledgements have not -been received (i.e. some nodes have not yet confirmed that they have applied -the current update), these nodes are said to be _lagging_ since their cluster -states have fallen behind the master's latest state. The master waits for the -lagging nodes to catch up for a further time, `cluster.follower_lag.timeout`, -which defaults to `90s`. If a node has still not successfully applied the -cluster state update within this time then it is considered to have failed and -is removed from the cluster. +elapsed, the elected master node considers the change to have succeeded. It +waits until the timeout has elapsed or until it has received acknowledgements +that each node in the cluster has applied the updated state, and then starts +processing and publishing the next cluster state update. If some +acknowledgements have not been received (i.e. some nodes have not yet confirmed +that they have applied the current update), these nodes are said to be +_lagging_ since their cluster states have fallen behind the elected master's +latest state. The elected master waits for the lagging nodes to catch up for a +further time, `cluster.follower_lag.timeout`, which defaults to `90s`. If a +node has still not successfully applied the cluster state update within this +time then it is considered to have failed and the elected master removes it +from the cluster. Cluster state updates are typically published as diffs to the previous cluster state, which reduces the time and network bandwidth needed to publish a cluster @@ -40,12 +42,19 @@ state update. For example, when updating the mappings for only a subset of the indices in the cluster state, only the updates for those indices need to be published to the nodes in the cluster, as long as those nodes have the previous cluster state. If a node is missing the previous cluster state, for example -when rejoining a cluster, the master will publish the full cluster state to -that node so that it can receive future updates as diffs. +when rejoining a cluster, the elected master will publish the full cluster +state to that node so that it can receive future updates as diffs. NOTE: {es} is a peer to peer based system, in which nodes communicate with one another directly. The high-throughput APIs (index, delete, search) do not -normally interact with the master node. The responsibility of the master node -is to maintain the global cluster state and reassign shards when nodes join or -leave the cluster. Each time the cluster state is changed, the new state is -published to all nodes in the cluster as described above. +normally interact with the elected master node. The responsibility of the +elected master node is to maintain the global cluster state which includes +reassigning shards when nodes join or leave the cluster. Each time the cluster +state is changed, the new state is published to all nodes in the cluster as +described above. + +The performance characteristics of cluster state updates are a function of the +speed of the storage on each master-eligible node, as well as the reliability +and latency of the network interconnections between all nodes in the cluster. +You must therefore ensure that the storage and networking available to the +nodes in your cluster are good enough to meet your performance goals. diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index b4a28b07db900..372af89af4a8a 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -194,13 +194,6 @@ High availability (HA) clusters require at least three master-eligible nodes, at least two of which are not voting-only nodes. Such a cluster will be able to elect a master node even if one of the nodes fails. -Since voting-only nodes never act as the cluster's elected master, they may -require less heap and a less powerful CPU than the true master nodes. -However all master-eligible nodes, including voting-only nodes, require -reasonably fast persistent storage and a reliable and low-latency network -connection to the rest of the cluster, since they are on the critical path for -<>. - Voting-only master-eligible nodes may also fill other roles in your cluster. For instance, a node may be both a data node and a voting-only master-eligible node. A _dedicated_ voting-only master-eligible nodes is a voting-only @@ -212,6 +205,20 @@ dedicated voting-only master-eligible node, set: node.roles: [ master, voting_only ] ------------------- +Since dedicated voting-only nodes never act as the cluster's elected master, +they may require less heap and a less powerful CPU than the true master nodes. +However all master-eligible nodes, including voting-only nodes, are on the +critical path for <>. Cluster state updates are usually independent of +performance-critical workloads such as indexing or searches, but they are +involved in management activities such as index creation and rollover, mapping +updates, and recovery after a failure. The performance characteristics of these +activities are a function of the speed of the storage on each master-eligible +node, as well as the reliability and latency of the network interconnections +between the elected master node and the other nodes in the cluster. You must +therefore ensure that the storage and networking available to the nodes in your +cluster are good enough to meet your performance goals. + [[data-node]] ==== Data node From 08fb6ed6923fec089b2eb3eff2a39276491937f6 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Tue, 9 Aug 2022 15:19:05 +0300 Subject: [PATCH 146/265] [ML] Extract downscale to zero logic into its own method (#89197) This commit extracts the logic that downscales to zero when the ML autoscaling decider detects there are no ML tasks at all. --- .../MlAutoscalingDeciderService.java | 73 +++++++++++-------- 1 file changed, 41 insertions(+), 32 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java index 00c8424405349..bf76d20ab5ad8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java @@ -376,39 +376,10 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider } // We don't need to check anything as there are no tasks - // This is a quick path to downscale. - // simply return `0` for scale down if delay is satisfied if (mlContext.isEmpty()) { - // We might be in a need zero, have zero situation, in which case it's nicer to pass a "no change" explanation - if (currentScale.getTierMlNativeMemoryRequirementExcludingOverhead() == 0 - && currentScale.getNodeMlNativeMemoryRequirementExcludingOverhead() == 0) { - return new AutoscalingDeciderResult( - context.currentCapacity(), - reasonBuilder.setSimpleReason("Passing currently perceived capacity as no scaling changes are necessary").build() - ); - } - long msLeftToScale = msLeftToDownScale(configuration); - if (msLeftToScale > 0) { - return new AutoscalingDeciderResult( - context.currentCapacity(), - reasonBuilder.setSimpleReason( - String.format( - Locale.ROOT, - "Passing currently perceived capacity as down scale delay has not been satisfied; configured delay [%s] " - + "last detected scale down event [%s]. Will request scale down in approximately [%s]", - DOWN_SCALE_DELAY.get(configuration).getStringRep(), - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(scaleDownDetected)), - TimeValue.timeValueMillis(msLeftToScale).getStringRep() - ) - ).build() - ); - } - return new AutoscalingDeciderResult( - AutoscalingCapacity.ZERO, - reasonBuilder.setRequiredCapacity(AutoscalingCapacity.ZERO) - .setSimpleReason("Requesting scale down as tier and/or node size could be smaller") - .build() - ); + // This is a quick path to downscale. + // simply return `0` for scale down if delay is satisfied + return downscaleToZero(configuration, context, currentScale, reasonBuilder); } // This is the sole check for memory staleness. It's possible that memory becomes stale while we execute the rest @@ -614,6 +585,44 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider ); } + private AutoscalingDeciderResult downscaleToZero( + Settings configuration, + AutoscalingDeciderContext context, + NativeMemoryCapacity currentScale, + MlScalingReason.Builder reasonBuilder + ) { + // We might be in a need zero, have zero situation, in which case it's nicer to pass a "no change" explanation + if (currentScale.getTierMlNativeMemoryRequirementExcludingOverhead() == 0 + && currentScale.getNodeMlNativeMemoryRequirementExcludingOverhead() == 0) { + return new AutoscalingDeciderResult( + context.currentCapacity(), + reasonBuilder.setSimpleReason("Passing currently perceived capacity as no scaling changes are necessary").build() + ); + } + long msLeftToScale = msLeftToDownScale(configuration); + if (msLeftToScale > 0) { + return new AutoscalingDeciderResult( + context.currentCapacity(), + reasonBuilder.setSimpleReason( + String.format( + Locale.ROOT, + "Passing currently perceived capacity as down scale delay has not been satisfied; configured delay [%s] " + + "last detected scale down event [%s]. Will request scale down in approximately [%s]", + DOWN_SCALE_DELAY.get(configuration).getStringRep(), + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(scaleDownDetected)), + TimeValue.timeValueMillis(msLeftToScale).getStringRep() + ) + ).build() + ); + } + return new AutoscalingDeciderResult( + AutoscalingCapacity.ZERO, + reasonBuilder.setRequiredCapacity(AutoscalingCapacity.ZERO) + .setSimpleReason("Requesting scale down as tier and/or node size could be smaller") + .build() + ); + } + private long maxMemoryBytes(MlAutoscalingContext mlContext) { long maxMemoryBytes = Math.max( mlContext.anomalyDetectionTasks.stream() From cd359b3d3993c034d8f4fe4296afcea2247cf8c4 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 9 Aug 2022 15:07:22 +0200 Subject: [PATCH 147/265] geo_line aggregation returns a geojson point when the resulting line has only one point (#89199) This commit changes the geojson output to return a point instead in the cases a geo_line aggregation only contains one point. --- docs/changelog/89199.yaml | 7 ++++ .../search/aggregations/InternalGeoLine.java | 9 +++-- .../aggregations/GeoLineAggregatorTests.java | 34 +++++++++++++++++++ 3 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/89199.yaml diff --git a/docs/changelog/89199.yaml b/docs/changelog/89199.yaml new file mode 100644 index 0000000000000..3f3ca6d01707f --- /dev/null +++ b/docs/changelog/89199.yaml @@ -0,0 +1,7 @@ +pr: 89199 +summary: Geo_line aggregation returns a geojson point when the resulting line has + only one point +area: Geo +type: bug +issues: + - 85748 diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java index 67205a3d1d26e..89cbeef0a55f1 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java @@ -227,8 +227,13 @@ public Map geoJSONGeometry() { ); } final Map geoJSON = new HashMap<>(); - geoJSON.put("type", "LineString"); - geoJSON.put("coordinates", coordinates.toArray()); + if (coordinates.size() == 1) { + geoJSON.put("type", "Point"); + geoJSON.put("coordinates", coordinates.get(0)); + } else { + geoJSON.put("type", "LineString"); + geoJSON.put("coordinates", coordinates.toArray()); + } return geoJSON; } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java index ad8d47781a4aa..fb19a481a69d2 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java @@ -263,6 +263,40 @@ public void testEmpty() throws IOException { testCase(new MatchAllDocsQuery(), aggregationBuilder, iw -> {}, terms -> { assertTrue(terms.getBuckets().isEmpty()); }); } + public void testOnePoint() throws IOException { + int size = randomIntBetween(1, GeoLineAggregationBuilder.MAX_PATH_SIZE); + MultiValuesSourceFieldConfig valueConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("value_field").build(); + MultiValuesSourceFieldConfig sortConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("sort_field").build(); + GeoLineAggregationBuilder lineAggregationBuilder = new GeoLineAggregationBuilder("_name").point(valueConfig) + .sortOrder(SortOrder.ASC) + .sort(sortConfig) + .size(size); + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").field("group_id") + .subAggregation(lineAggregationBuilder); + double lon = GeoEncodingUtils.decodeLongitude(randomInt()); + double lat = GeoEncodingUtils.decodeLatitude(randomInt()); + testCase(new MatchAllDocsQuery(), aggregationBuilder, iw -> { + iw.addDocument( + Arrays.asList( + new LatLonDocValuesField("value_field", lat, lon), + new SortedNumericDocValuesField("sort_field", NumericUtils.doubleToSortableLong(randomDouble())), + new SortedDocValuesField("group_id", new BytesRef("groupOrd")) + ) + ); + }, terms -> { + assertEquals(1, terms.getBuckets().size()); + InternalGeoLine geoLine = terms.getBuckets().get(0).getAggregations().get("_name"); + assertNotNull(geoLine); + Map geojson = geoLine.geoJSONGeometry(); + assertEquals("Point", geojson.get("type")); + assertTrue(geojson.get("coordinates") instanceof double[]); + double[] coordinates = (double[]) geojson.get("coordinates"); + assertEquals(2, coordinates.length); + assertEquals(lon, coordinates[0], 1e-6); + assertEquals(lat, coordinates[1], 1e-6); + }); + } + private void testAggregator(SortOrder sortOrder) throws IOException { int size = randomIntBetween(1, GeoLineAggregationBuilder.MAX_PATH_SIZE); MultiValuesSourceFieldConfig valueConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("value_field").build(); From e63bcb550e16f9dbbcf0a55fea194ef554b169a5 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 9 Aug 2022 08:47:29 -0500 Subject: [PATCH 148/265] Fixing internal action names (#89182) Fixing the names of the internal actions used by CoordinationDiagnosticsService to begin with "internal:" so that they can be used in the system context with security enabled. --- docs/changelog/89182.yaml | 5 +++++ .../cluster/coordination/ClusterFormationInfoAction.java | 2 +- .../cluster/coordination/CoordinationDiagnosticsAction.java | 2 +- .../admin/cluster/coordination/MasterHistoryAction.java | 2 +- .../cluster/coordination/MasterHistoryService.java | 2 +- .../elasticsearch/xpack/security/operator/Constants.java | 6 +++--- 6 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/89182.yaml diff --git a/docs/changelog/89182.yaml b/docs/changelog/89182.yaml new file mode 100644 index 0000000000000..3189da9a8107f --- /dev/null +++ b/docs/changelog/89182.yaml @@ -0,0 +1,5 @@ +pr: 89182 +summary: Fixing internal action names +area: Health +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java index 66e3383bb6dd6..cde7aecd7ed21 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java @@ -33,7 +33,7 @@ public class ClusterFormationInfoAction extends ActionType { public static final ClusterFormationInfoAction INSTANCE = new ClusterFormationInfoAction(); - public static final String NAME = "cluster:internal/formation/info"; + public static final String NAME = "internal:cluster/formation/info"; private ClusterFormationInfoAction() { super(NAME, ClusterFormationInfoAction.Response::new); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java index 913003c446d5a..6cde5080e6710 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java @@ -34,7 +34,7 @@ public class CoordinationDiagnosticsAction extends ActionType { public static final CoordinationDiagnosticsAction INSTANCE = new CoordinationDiagnosticsAction(); - public static final String NAME = "cluster:internal/coordination_diagnostics/info"; + public static final String NAME = "internal:cluster/coordination_diagnostics/info"; private CoordinationDiagnosticsAction() { super(NAME, CoordinationDiagnosticsAction.Response::new); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java index 5a1fa58d4a852..d9e5a3e251629 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java @@ -34,7 +34,7 @@ public class MasterHistoryAction extends ActionType { public static final MasterHistoryAction INSTANCE = new MasterHistoryAction(); - public static final String NAME = "cluster:internal/master_history/get"; + public static final String NAME = "internal:cluster/master_history/get"; private MasterHistoryAction() { super(NAME, MasterHistoryAction.Response::new); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java index f16b5bf9135f3..38bf270595d45 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java @@ -114,7 +114,7 @@ public List getRemoteMasterHistory() throws Exception { * @param node The node whose view of the master history we want to fetch */ public void refreshRemoteMasterHistory(DiscoveryNode node) { - Version minSupportedVersion = Version.V_8_3_0; + Version minSupportedVersion = Version.V_8_4_0; if (node.getVersion().onOrAfter(minSupportedVersion)) { // This was introduced in 8.3.0 logger.trace( "Cannot get master history for {} because it is at version {} and {} is required", diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index c6c7d538f8118..14d81525a55a8 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -247,9 +247,6 @@ public class Constants { "cluster:internal/xpack/ml/trained_models/cache/info", "cluster:internal/xpack/ml/trained_models/deployments/stats/get", "cluster:internal/xpack/transform/reset_mode", - "cluster:internal/master_history/get", - "cluster:internal/coordination_diagnostics/info", - "cluster:internal/formation/info", "cluster:monitor/allocation/explain", "cluster:monitor/async_search/status", "cluster:monitor/ccr/follow_info", @@ -483,6 +480,9 @@ public class Constants { "internal:admin/xpack/searchable_snapshots/frozen_cache_info", "internal:admin/xpack/searchable_snapshots/frozen_cache_info[n]", "internal:cluster/nodes/indices/shard/store", + "internal:cluster/master_history/get", + "internal:cluster/coordination_diagnostics/info", + "internal:cluster/formation/info", "internal:gateway/local/started_shards" ); } From 5233229bfb29660e253246794b148634400b011a Mon Sep 17 00:00:00 2001 From: Julien Lind Date: Tue, 9 Aug 2022 15:54:35 +0200 Subject: [PATCH 149/265] Update CODEOWNERS (#89155) --- .github/CODEOWNERS | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1f9ace96bd3ee..7a95e4eaef1b1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -15,3 +15,7 @@ x-pack/plugin/core/src/main/resources/monitoring-logstash-mb.json @elastic/infra x-pack/plugin/core/src/main/resources/monitoring-logstash.json @elastic/infra-monitoring-ui x-pack/plugin/core/src/main/resources/monitoring-mb-ilm-policy.json @elastic/infra-monitoring-ui x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @elastic/infra-monitoring-ui + +# Elastic Agent +x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet @elastic/elastic-agent-control-plane +x-pack/plugin/core/src/main/resources/fleet-* @elastic/elastic-agent-control-plane From 7b615ac2deebeea07f8f88f486f1b7b8ffb4d7eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Francisco=20Fern=C3=A1ndez=20Casta=C3=B1o?= Date: Tue, 9 Aug 2022 16:10:31 +0200 Subject: [PATCH 150/265] Fix ReactiveStorageIT#testScaleDuringSplitOrClone (#88607) Sometimes the autoscaling decider returns an empty response when the service does not have enough information to provide an autoscaling decision, i.e. when a new node joins it tries to fetch the new node memory info and this might take a while. This commits adds a busy assertion to ensure that a valid autoscaling capacity is provided eventually. Closes #88478 --- .../xpack/autoscaling/storage/ReactiveStorageIT.java | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java index c1e775e569138..709262918b18c 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java @@ -41,6 +41,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class ReactiveStorageIT extends AutoscalingStorageIntegTestCase { @@ -368,7 +369,6 @@ public void testScaleWhileShrinking() throws Exception { ensureGreen(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/88478") public void testScaleDuringSplitOrClone() throws Exception { internalCluster().startMasterOnlyNode(); final String dataNode1Name = internalCluster().startDataOnlyNode(); @@ -404,6 +404,14 @@ public void testScaleDuringSplitOrClone() throws Exception { setTotalSpace(dataNode1Name, enoughSpace); setTotalSpace(dataNode2Name, enoughSpace); + // It might take a while until the autoscaling polls the node information of dataNode2 and + // provides a complete autoscaling capacity response + assertBusy(() -> { + GetAutoscalingCapacityAction.Response response = capacity(); + assertThat(response.results().keySet(), equalTo(Set.of(policyName))); + assertThat(response.results().get(policyName).requiredCapacity(), is(notNullValue())); + }); + // validate initial state looks good GetAutoscalingCapacityAction.Response response = capacity(); assertThat(response.results().keySet(), equalTo(Set.of(policyName))); From 6a91f9702dcd21e3a5568aa6213ba229fc1905ea Mon Sep 17 00:00:00 2001 From: Kevin Lacabane Date: Tue, 9 Aug 2022 16:21:40 +0200 Subject: [PATCH 151/265] [Stack Monitoring] Cleanup unused mappings properties (#88899) * cleanup unused mappings properties * remove root version property --- .../src/main/resources/monitoring-es-mb.json | 217 ------------------ .../main/resources/monitoring-kibana-mb.json | 210 ----------------- .../resources/monitoring-logstash-mb.json | 202 ---------------- .../MonitoringTemplateRegistry.java | 2 +- 4 files changed, 1 insertion(+), 630 deletions(-) diff --git a/x-pack/plugin/core/src/main/resources/monitoring-es-mb.json b/x-pack/plugin/core/src/main/resources/monitoring-es-mb.json index 2c2d50f403796..5f73606c62f25 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-es-mb.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-es-mb.json @@ -2118,126 +2118,6 @@ "type": { "ignore_above": 1024, "type": "keyword" - }, - "environment": { - "type": "keyword", - "ignore_above": 1024 - }, - "ephemeral_id": { - "type": "keyword", - "ignore_above": 1024 - }, - "hostname": { - "type": "keyword", - "ignore_above": 1024 - }, - "id": { - "type": "keyword", - "ignore_above": 1024 - }, - "node": { - "properties": { - "name": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "origin": { - "properties": { - "address": { - "type": "keyword", - "ignore_above": 1024 - }, - "environment": { - "type": "keyword", - "ignore_above": 1024 - }, - "ephemeral_id": { - "type": "keyword", - "ignore_above": 1024 - }, - "id": { - "type": "keyword", - "ignore_above": 1024 - }, - "name": { - "type": "keyword", - "ignore_above": 1024 - }, - "node": { - "properties": { - "name": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "state": { - "type": "keyword", - "ignore_above": 1024 - }, - "type": { - "type": "keyword", - "ignore_above": 1024 - }, - "version": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "state": { - "type": "keyword", - "ignore_above": 1024 - }, - "target": { - "properties": { - "address": { - "type": "keyword", - "ignore_above": 1024 - }, - "environment": { - "type": "keyword", - "ignore_above": 1024 - }, - "ephemeral_id": { - "type": "keyword", - "ignore_above": 1024 - }, - "id": { - "type": "keyword", - "ignore_above": 1024 - }, - "name": { - "type": "keyword", - "ignore_above": 1024 - }, - "node": { - "properties": { - "name": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "state": { - "type": "keyword", - "ignore_above": 1024 - }, - "type": { - "type": "keyword", - "ignore_above": 1024 - }, - "version": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "version": { - "type": "keyword", - "ignore_above": 1024 } } }, @@ -2445,9 +2325,6 @@ } } }, - "version": { - "type": "long" - }, "ccr_auto_follow_stats": { "properties": { "number_of_failed_remote_cluster_state_requests": { @@ -2973,14 +2850,6 @@ "name": { "type": "keyword", "ignore_above": 1024 - }, - "architecture": { - "type": "keyword", - "ignore_above": 1024 - }, - "hostname": { - "type": "keyword", - "ignore_above": 1024 } } }, @@ -2997,25 +2866,6 @@ }, "event": { "properties": { - "action": { - "type": "keyword", - "ignore_above": 1024 - }, - "agent_id_status": { - "type": "keyword", - "ignore_above": 1024 - }, - "category": { - "type": "keyword", - "ignore_above": 1024 - }, - "code": { - "type": "keyword", - "ignore_above": 1024 - }, - "created": { - "type": "date" - }, "dataset": { "type": "keyword", "ignore_above": 1024 @@ -3023,76 +2873,9 @@ "duration": { "type": "long" }, - "end": { - "type": "date" - }, - "hash": { - "type": "keyword", - "ignore_above": 1024 - }, - "id": { - "type": "keyword", - "ignore_above": 1024 - }, - "ingested": { - "type": "date" - }, - "kind": { - "type": "keyword", - "ignore_above": 1024 - }, "module": { "type": "keyword", "ignore_above": 1024 - }, - "original": { - "type": "keyword", - "index": false, - "doc_values": false, - "ignore_above": 1024 - }, - "outcome": { - "type": "keyword", - "ignore_above": 1024 - }, - "provider": { - "type": "keyword", - "ignore_above": 1024 - }, - "reason": { - "type": "keyword", - "ignore_above": 1024 - }, - "reference": { - "type": "keyword", - "ignore_above": 1024 - }, - "risk_score": { - "type": "float" - }, - "risk_score_norm": { - "type": "float" - }, - "sequence": { - "type": "long" - }, - "severity": { - "type": "long" - }, - "start": { - "type": "date" - }, - "timezone": { - "type": "keyword", - "ignore_above": 1024 - }, - "type": { - "type": "keyword", - "ignore_above": 1024 - }, - "url": { - "type": "keyword", - "ignore_above": 1024 } } }, diff --git a/x-pack/plugin/core/src/main/resources/monitoring-kibana-mb.json b/x-pack/plugin/core/src/main/resources/monitoring-kibana-mb.json index 9e6262435de41..ff8abc5d89959 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-kibana-mb.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-kibana-mb.json @@ -29,10 +29,6 @@ "ignore_above": 1024, "type": "keyword" }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, "id": { "ignore_above": 1024, "type": "keyword" @@ -44,118 +40,6 @@ "version": { "ignore_above": 1024, "type": "keyword" - }, - "environment": { - "type": "keyword", - "ignore_above": 1024 - }, - "ephemeral_id": { - "type": "keyword", - "ignore_above": 1024 - }, - "hostname": { - "type": "keyword", - "ignore_above": 1024 - }, - "node": { - "properties": { - "name": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "origin": { - "properties": { - "address": { - "type": "keyword", - "ignore_above": 1024 - }, - "environment": { - "type": "keyword", - "ignore_above": 1024 - }, - "ephemeral_id": { - "type": "keyword", - "ignore_above": 1024 - }, - "id": { - "type": "keyword", - "ignore_above": 1024 - }, - "name": { - "type": "keyword", - "ignore_above": 1024 - }, - "node": { - "properties": { - "name": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "state": { - "type": "keyword", - "ignore_above": 1024 - }, - "type": { - "type": "keyword", - "ignore_above": 1024 - }, - "version": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "state": { - "type": "keyword", - "ignore_above": 1024 - }, - "target": { - "properties": { - "address": { - "type": "keyword", - "ignore_above": 1024 - }, - "environment": { - "type": "keyword", - "ignore_above": 1024 - }, - "ephemeral_id": { - "type": "keyword", - "ignore_above": 1024 - }, - "id": { - "type": "keyword", - "ignore_above": 1024 - }, - "name": { - "type": "keyword", - "ignore_above": 1024 - }, - "node": { - "properties": { - "name": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "state": { - "type": "keyword", - "ignore_above": 1024 - }, - "type": { - "type": "keyword", - "ignore_above": 1024 - }, - "version": { - "type": "keyword", - "ignore_above": 1024 - } - } } } }, @@ -587,14 +471,6 @@ "name": { "type": "keyword", "ignore_above": 1024 - }, - "architecture": { - "type": "keyword", - "ignore_above": 1024 - }, - "hostname": { - "type": "keyword", - "ignore_above": 1024 } } }, @@ -611,25 +487,6 @@ }, "event": { "properties": { - "action": { - "type": "keyword", - "ignore_above": 1024 - }, - "agent_id_status": { - "type": "keyword", - "ignore_above": 1024 - }, - "category": { - "type": "keyword", - "ignore_above": 1024 - }, - "code": { - "type": "keyword", - "ignore_above": 1024 - }, - "created": { - "type": "date" - }, "dataset": { "type": "keyword", "ignore_above": 1024 @@ -637,76 +494,9 @@ "duration": { "type": "long" }, - "end": { - "type": "date" - }, - "hash": { - "type": "keyword", - "ignore_above": 1024 - }, - "id": { - "type": "keyword", - "ignore_above": 1024 - }, - "ingested": { - "type": "date" - }, - "kind": { - "type": "keyword", - "ignore_above": 1024 - }, "module": { "type": "keyword", "ignore_above": 1024 - }, - "original": { - "type": "keyword", - "index": false, - "doc_values": false, - "ignore_above": 1024 - }, - "outcome": { - "type": "keyword", - "ignore_above": 1024 - }, - "provider": { - "type": "keyword", - "ignore_above": 1024 - }, - "reason": { - "type": "keyword", - "ignore_above": 1024 - }, - "reference": { - "type": "keyword", - "ignore_above": 1024 - }, - "risk_score": { - "type": "float" - }, - "risk_score_norm": { - "type": "float" - }, - "sequence": { - "type": "long" - }, - "severity": { - "type": "long" - }, - "start": { - "type": "date" - }, - "timezone": { - "type": "keyword", - "ignore_above": 1024 - }, - "type": { - "type": "keyword", - "ignore_above": 1024 - }, - "url": { - "type": "keyword", - "ignore_above": 1024 } } }, diff --git a/x-pack/plugin/core/src/main/resources/monitoring-logstash-mb.json b/x-pack/plugin/core/src/main/resources/monitoring-logstash-mb.json index f8172dae66899..89a9b6e273397 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-logstash-mb.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-logstash-mb.json @@ -504,130 +504,14 @@ "version": { "ignore_above": 1024, "type": "keyword" - }, - "environment": { - "type": "keyword", - "ignore_above": 1024 - }, - "ephemeral_id": { - "type": "keyword", - "ignore_above": 1024 - }, - "node": { - "properties": { - "name": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "origin": { - "properties": { - "address": { - "type": "keyword", - "ignore_above": 1024 - }, - "environment": { - "type": "keyword", - "ignore_above": 1024 - }, - "ephemeral_id": { - "type": "keyword", - "ignore_above": 1024 - }, - "id": { - "type": "keyword", - "ignore_above": 1024 - }, - "name": { - "type": "keyword", - "ignore_above": 1024 - }, - "node": { - "properties": { - "name": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "state": { - "type": "keyword", - "ignore_above": 1024 - }, - "type": { - "type": "keyword", - "ignore_above": 1024 - }, - "version": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "state": { - "type": "keyword", - "ignore_above": 1024 - }, - "target": { - "properties": { - "address": { - "type": "keyword", - "ignore_above": 1024 - }, - "environment": { - "type": "keyword", - "ignore_above": 1024 - }, - "ephemeral_id": { - "type": "keyword", - "ignore_above": 1024 - }, - "id": { - "type": "keyword", - "ignore_above": 1024 - }, - "name": { - "type": "keyword", - "ignore_above": 1024 - }, - "node": { - "properties": { - "name": { - "type": "keyword", - "ignore_above": 1024 - } - } - }, - "state": { - "type": "keyword", - "ignore_above": 1024 - }, - "type": { - "type": "keyword", - "ignore_above": 1024 - }, - "version": { - "type": "keyword", - "ignore_above": 1024 - } - } } } }, "host": { "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, "name": { "type": "keyword", "ignore_above": 1024 - }, - "architecture": { - "type": "keyword", - "ignore_above": 1024 } } }, @@ -648,25 +532,6 @@ }, "event": { "properties": { - "action": { - "type": "keyword", - "ignore_above": 1024 - }, - "agent_id_status": { - "type": "keyword", - "ignore_above": 1024 - }, - "category": { - "type": "keyword", - "ignore_above": 1024 - }, - "code": { - "type": "keyword", - "ignore_above": 1024 - }, - "created": { - "type": "date" - }, "dataset": { "type": "keyword", "ignore_above": 1024 @@ -674,76 +539,9 @@ "duration": { "type": "long" }, - "end": { - "type": "date" - }, - "hash": { - "type": "keyword", - "ignore_above": 1024 - }, - "id": { - "type": "keyword", - "ignore_above": 1024 - }, - "ingested": { - "type": "date" - }, - "kind": { - "type": "keyword", - "ignore_above": 1024 - }, "module": { "type": "keyword", "ignore_above": 1024 - }, - "original": { - "type": "keyword", - "index": false, - "doc_values": false, - "ignore_above": 1024 - }, - "outcome": { - "type": "keyword", - "ignore_above": 1024 - }, - "provider": { - "type": "keyword", - "ignore_above": 1024 - }, - "reason": { - "type": "keyword", - "ignore_above": 1024 - }, - "reference": { - "type": "keyword", - "ignore_above": 1024 - }, - "risk_score": { - "type": "float" - }, - "risk_score_norm": { - "type": "float" - }, - "sequence": { - "type": "long" - }, - "severity": { - "type": "long" - }, - "start": { - "type": "date" - }, - "timezone": { - "type": "keyword", - "ignore_above": 1024 - }, - "type": { - "type": "keyword", - "ignore_above": 1024 - }, - "url": { - "type": "keyword", - "ignore_above": 1024 } } }, diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index d6dc0bb836151..b488f13cb4c54 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -78,7 +78,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = Version.V_8_0_0.id + 3; + public static final int STACK_MONITORING_REGISTRY_VERSION = Version.V_8_0_0.id + 4; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; From de281b50724aa5b4b853519ededa83a0b2ee30d2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 9 Aug 2022 16:40:38 +0100 Subject: [PATCH 152/265] Complete listener in ReservedStateErrorTaskExecutor (#89191) --- .../service/ReservedStateErrorTaskExecutor.java | 7 +++---- .../service/ReservedClusterStateServiceTests.java | 7 ++++++- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java index 5a3d70668855b..ea37daf87ba66 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java @@ -27,10 +27,9 @@ record ReservedStateErrorTaskExecutor() implements ClusterStateTaskExecutor> taskContexts) { for (final var taskContext : taskContexts) { - currentState = taskContext.getTask().execute(currentState); - taskContext.success( - () -> taskContext.getTask().listener().delegateFailure((l, s) -> l.onResponse(ActionResponse.Empty.INSTANCE)) - ); + final var task = taskContext.getTask(); + currentState = task.execute(currentState); + taskContext.success(() -> task.listener().onResponse(ActionResponse.Empty.INSTANCE)); } return currentState; } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index 478ca01f2de96..27ae0157cd121 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -191,12 +191,16 @@ public void onFailure(Exception failure) {} public void testErrorStateTask() throws Exception { ClusterState state = ClusterState.builder(new ClusterName("test")).build(); + final var listenerCompleted = new AtomicBoolean(false); + ReservedStateErrorTask task = spy( new ReservedStateErrorTask( new ErrorState("test", 1L, List.of("some parse error", "some io error"), ReservedStateErrorMetadata.ErrorKind.PARSING), new ActionListener<>() { @Override - public void onResponse(ActionResponse.Empty empty) {} + public void onResponse(ActionResponse.Empty empty) { + listenerCompleted.set(true); + } @Override public void onFailure(Exception e) {} @@ -241,6 +245,7 @@ public void onFailure(Exception failure) {} assertEquals(1L, (long) operatorMetadata.errorMetadata().version()); assertEquals(ReservedStateErrorMetadata.ErrorKind.PARSING, operatorMetadata.errorMetadata().errorKind()); assertThat(operatorMetadata.errorMetadata().errors(), contains("some parse error", "some io error")); + assertTrue(listenerCompleted.get()); } public void testUpdateTaskDuplicateError() { From 264f09f3d571d1311eaa1aee2af22723a61736a1 Mon Sep 17 00:00:00 2001 From: Stuart Tettemer Date: Tue, 9 Aug 2022 12:31:18 -0500 Subject: [PATCH 153/265] Script: Common base class for write scripts (#89141) Adds `WriteScript` as the common base class for the write scripts: `IngestScript`, `UpdateScript`, `UpdateByQueryScript` and `ReindexScript`. This pulls the common `getCtx()` and `metadata()` methods into the base class and prepares for the implementation of the ingest fields api (https://github.com/elastic/elasticsearch/issues/79155). As part of the refactor, `IngestScript` now takes a `CtxMap` directly rather than taking "sourceAndMetadata" (`CtxMap`) and `Metadata` (from `CtxMap`). There is a new `getCtxMap()` getter to get the typed `CtxMap`. `getSourceAndMetadata` could have been refactored to do this, but most of the callers of that don't need to know about `CtxMap` and are happy with a `Map`. --- .../ingest/common/ScriptProcessor.java | 2 +- .../common/ScriptProcessorFactoryTests.java | 7 ++-- .../elasticsearch/ingest/IngestDocument.java | 40 +++++++++++-------- .../elasticsearch/script/IngestScript.java | 25 ++---------- .../elasticsearch/script/ReindexScript.java | 17 +------- .../script/UpdateByQueryScript.java | 17 +------- .../elasticsearch/script/UpdateScript.java | 16 +------- .../org/elasticsearch/script/WriteScript.java | 33 +++++++++++++++ .../script/MockScriptEngine.java | 2 +- .../ml/integration/MlNativeIntegTestCase.java | 2 +- .../xpack/ml/MlSingleNodeTestCase.java | 2 +- .../xpack/ml/support/BaseMlIntegTestCase.java | 2 +- 12 files changed, 75 insertions(+), 90 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/script/WriteScript.java diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java index fb538a0b6b264..84e66a3134b69 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java @@ -75,7 +75,7 @@ public IngestDocument execute(IngestDocument document) { if (factory == null) { factory = scriptService.compile(script, IngestScript.CONTEXT); } - factory.newInstance(script.getParams(), document.getMetadata(), document.getSourceAndMetadata()).execute(); + factory.newInstance(script.getParams(), document.getCtxMap()).execute(); return document; } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java index 7476eb2216dc6..4a8a01218b52a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java @@ -10,8 +10,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.TestIngestDocument; +import org.elasticsearch.script.CtxMap; import org.elasticsearch.script.IngestScript; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; @@ -158,9 +158,8 @@ public void testInlineIsCompiled() throws Exception { assertThat(processor.getScript().getType(), equalTo(ScriptType.INLINE)); assertThat(processor.getScript().getParams(), equalTo(Collections.emptyMap())); assertNotNull(processor.getPrecompiledIngestScriptFactory()); - IngestDocument doc = TestIngestDocument.emptyIngestDocument(); - Map ctx = TestIngestDocument.emptyIngestDocument().getSourceAndMetadata(); - processor.getPrecompiledIngestScriptFactory().newInstance(null, doc.getMetadata(), ctx).execute(); + CtxMap ctx = TestIngestDocument.emptyIngestDocument().getCtxMap(); + processor.getPrecompiledIngestScriptFactory().newInstance(null, ctx).execute(); assertThat(ctx.get("foo"), equalTo("bar")); } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index 3a3c2349aea25..6c18062e8d4f2 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -18,6 +18,7 @@ import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; +import org.elasticsearch.script.CtxMap; import org.elasticsearch.script.TemplateScript; import java.time.ZoneOffset; @@ -48,7 +49,7 @@ public final class IngestDocument { static final String TIMESTAMP = "timestamp"; - private final IngestCtxMap sourceAndMetadata; + private final IngestCtxMap ctxMap; private final Map ingestMetadata; // Contains all pipelines that have been executed for this document @@ -57,9 +58,9 @@ public final class IngestDocument { private boolean doNoSelfReferencesCheck = false; public IngestDocument(String index, String id, long version, String routing, VersionType versionType, Map source) { - this.sourceAndMetadata = new IngestCtxMap(index, id, version, routing, versionType, ZonedDateTime.now(ZoneOffset.UTC), source); + this.ctxMap = new IngestCtxMap(index, id, version, routing, versionType, ZonedDateTime.now(ZoneOffset.UTC), source); this.ingestMetadata = new HashMap<>(); - this.ingestMetadata.put(TIMESTAMP, sourceAndMetadata.getMetadata().getNow()); + this.ingestMetadata.put(TIMESTAMP, ctxMap.getMetadata().getNow()); } /** @@ -67,7 +68,7 @@ public IngestDocument(String index, String id, long version, String routing, Ver */ public IngestDocument(IngestDocument other) { this( - new IngestCtxMap(deepCopyMap(other.sourceAndMetadata.getSource()), other.sourceAndMetadata.getMetadata().clone()), + new IngestCtxMap(deepCopyMap(other.ctxMap.getSource()), other.ctxMap.getMetadata().clone()), deepCopyMap(other.ingestMetadata) ); } @@ -91,14 +92,14 @@ public IngestDocument(Map sourceAndMetadata, Map } } this.ingestMetadata = new HashMap<>(ingestMetadata); - this.sourceAndMetadata = new IngestCtxMap(source, new IngestDocMetadata(metadata, IngestCtxMap.getTimestamp(ingestMetadata))); + this.ctxMap = new IngestCtxMap(source, new IngestDocMetadata(metadata, IngestCtxMap.getTimestamp(ingestMetadata))); } /** * Constructor to create an IngestDocument from its constituent maps */ - IngestDocument(IngestCtxMap sourceAndMetadata, Map ingestMetadata) { - this.sourceAndMetadata = sourceAndMetadata; + IngestDocument(IngestCtxMap ctxMap, Map ingestMetadata) { + this.ctxMap = ctxMap; this.ingestMetadata = ingestMetadata; } @@ -702,8 +703,8 @@ public String renderTemplate(TemplateScript.Factory template) { private Map createTemplateModel() { return new LazyMap<>(() -> { - Map model = new HashMap<>(sourceAndMetadata); - model.put(SourceFieldMapper.NAME, sourceAndMetadata); + Map model = new HashMap<>(ctxMap); + model.put(SourceFieldMapper.NAME, ctxMap); // If there is a field in the source with the name '_ingest' it gets overwritten here, // if access to that field is required then it get accessed via '_source._ingest' model.put(INGEST_KEY, ingestMetadata); @@ -715,21 +716,28 @@ private Map createTemplateModel() { * Get source and metadata map */ public Map getSourceAndMetadata() { - return sourceAndMetadata; + return ctxMap; + } + + /** + * Get the CtxMap + */ + public CtxMap getCtxMap() { + return ctxMap; } /** * Get the strongly typed metadata */ public org.elasticsearch.script.Metadata getMetadata() { - return sourceAndMetadata.getMetadata(); + return ctxMap.getMetadata(); } /** * Get all source values in a Map */ public Map getSource() { - return sourceAndMetadata.getSource(); + return ctxMap.getSource(); } /** @@ -873,17 +881,17 @@ public boolean equals(Object obj) { } IngestDocument other = (IngestDocument) obj; - return Objects.equals(sourceAndMetadata, other.sourceAndMetadata) && Objects.equals(ingestMetadata, other.ingestMetadata); + return Objects.equals(ctxMap, other.ctxMap) && Objects.equals(ingestMetadata, other.ingestMetadata); } @Override public int hashCode() { - return Objects.hash(sourceAndMetadata, ingestMetadata); + return Objects.hash(ctxMap, ingestMetadata); } @Override public String toString() { - return "IngestDocument{" + " sourceAndMetadata=" + sourceAndMetadata + ", ingestMetadata=" + ingestMetadata + '}'; + return "IngestDocument{" + " sourceAndMetadata=" + ctxMap + ", ingestMetadata=" + ingestMetadata + '}'; } public enum Metadata { @@ -930,7 +938,7 @@ private FieldPath(String path) { initialContext = ingestMetadata; newPath = path.substring(INGEST_KEY_PREFIX.length(), path.length()); } else { - initialContext = sourceAndMetadata; + initialContext = ctxMap; if (path.startsWith(SOURCE_PREFIX)) { newPath = path.substring(SOURCE_PREFIX.length(), path.length()); } else { diff --git a/server/src/main/java/org/elasticsearch/script/IngestScript.java b/server/src/main/java/org/elasticsearch/script/IngestScript.java index 87a026caf5361..261575dd456e1 100644 --- a/server/src/main/java/org/elasticsearch/script/IngestScript.java +++ b/server/src/main/java/org/elasticsearch/script/IngestScript.java @@ -16,7 +16,7 @@ /** * A script used by the Ingest Script Processor. */ -public abstract class IngestScript { +public abstract class IngestScript extends WriteScript { public static final String[] PARAMETERS = {}; @@ -33,16 +33,9 @@ public abstract class IngestScript { /** The generic runtime parameters for the script. */ private final Map params; - /** The metadata available to the script */ - private final Metadata metadata; - - /** The metadata and source available to the script */ - private final Map ctx; - - public IngestScript(Map params, Metadata metadata, Map ctx) { + public IngestScript(Map params, CtxMap ctxMap) { + super(ctxMap); this.params = params; - this.metadata = metadata; - this.ctx = ctx; } /** Return the parameters for this script. */ @@ -50,19 +43,9 @@ public Map getParams() { return params; } - /** Provides backwards compatibility access to ctx */ - public Map getCtx() { - return ctx; - } - - /** Return the ingest metadata object */ - public Metadata metadata() { - return metadata; - } - public abstract void execute(); public interface Factory { - IngestScript newInstance(Map params, Metadata metadata, Map ctx); + IngestScript newInstance(Map params, CtxMap ctx); } } diff --git a/server/src/main/java/org/elasticsearch/script/ReindexScript.java b/server/src/main/java/org/elasticsearch/script/ReindexScript.java index 3498d4ad15714..b5f412ad02ac0 100644 --- a/server/src/main/java/org/elasticsearch/script/ReindexScript.java +++ b/server/src/main/java/org/elasticsearch/script/ReindexScript.java @@ -14,7 +14,7 @@ /** * A script used in the reindex api */ -public abstract class ReindexScript { +public abstract class ReindexScript extends WriteScript { public static final String[] PARAMETERS = {}; @@ -24,9 +24,6 @@ public abstract class ReindexScript { /** The generic runtime parameters for the script. */ private final Map params; - /** The context map for the script */ - private final CtxMap ctxMap; - /** * Metadata available to the script * _index can't be null @@ -34,8 +31,8 @@ public abstract class ReindexScript { * op must be 'noop', 'index' or 'delete' */ public ReindexScript(Map params, CtxMap ctxMap) { + super(ctxMap); this.params = params; - this.ctxMap = ctxMap; } /** Return the parameters for this script. */ @@ -43,16 +40,6 @@ public Map getParams() { return params; } - /** Return the context map for this script */ - public Map getCtx() { - return ctxMap; - } - - /** Return the update metadata for this script */ - public Metadata metadata() { - return ctxMap.getMetadata(); - } - public abstract void execute(); public interface Factory { diff --git a/server/src/main/java/org/elasticsearch/script/UpdateByQueryScript.java b/server/src/main/java/org/elasticsearch/script/UpdateByQueryScript.java index f8275d496d614..af93d4dfeaea5 100644 --- a/server/src/main/java/org/elasticsearch/script/UpdateByQueryScript.java +++ b/server/src/main/java/org/elasticsearch/script/UpdateByQueryScript.java @@ -14,7 +14,7 @@ /** * A script used by the update by query api */ -public abstract class UpdateByQueryScript { +public abstract class UpdateByQueryScript extends WriteScript { public static final String[] PARAMETERS = {}; @@ -24,12 +24,9 @@ public abstract class UpdateByQueryScript { /** The generic runtime parameters for the script. */ private final Map params; - /** The context map for the script */ - private final CtxMap ctxMap; - public UpdateByQueryScript(Map params, CtxMap ctxMap) { + super(ctxMap); this.params = params; - this.ctxMap = ctxMap; } /** Return the parameters for this script. */ @@ -37,16 +34,6 @@ public Map getParams() { return params; } - /** Return the context map for this script */ - public Map getCtx() { - return ctxMap; - } - - /** Return the update metadata for this script */ - public Metadata metadata() { - return ctxMap.getMetadata(); - } - public abstract void execute(); public interface Factory { diff --git a/server/src/main/java/org/elasticsearch/script/UpdateScript.java b/server/src/main/java/org/elasticsearch/script/UpdateScript.java index 19be8f0742fdb..d3186b5f28c2f 100644 --- a/server/src/main/java/org/elasticsearch/script/UpdateScript.java +++ b/server/src/main/java/org/elasticsearch/script/UpdateScript.java @@ -14,7 +14,7 @@ /** * A script used in the update API */ -public abstract class UpdateScript { +public abstract class UpdateScript extends WriteScript { public static final String[] PARAMETERS = {}; @@ -24,11 +24,9 @@ public abstract class UpdateScript { /** The generic runtime parameters for the script. */ private final Map params; - private final UpdateCtxMap ctxMap; - public UpdateScript(Map params, UpdateCtxMap ctxMap) { + super(ctxMap); this.params = params; - this.ctxMap = ctxMap; } /** Return the parameters for this script. */ @@ -36,16 +34,6 @@ public Map getParams() { return params; } - /** Return the update context for this script. */ - public Map getCtx() { - return ctxMap; - } - - /** Return the update metadata for this script */ - public Metadata metadata() { - return ctxMap.getMetadata(); - } - public abstract void execute(); public interface Factory { diff --git a/server/src/main/java/org/elasticsearch/script/WriteScript.java b/server/src/main/java/org/elasticsearch/script/WriteScript.java new file mode 100644 index 0000000000000..e268c0e0c28af --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/WriteScript.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script; + +import java.util.Map; + +/** + * Abstract base class for scripts that write documents. + * These scripts provide {@code ctx} for backwards compatibility and expose {@link Metadata}. + */ +public abstract class WriteScript { + protected final CtxMap ctxMap; + + public WriteScript(CtxMap ctxMap) { + this.ctxMap = ctxMap; + } + + /** Provides backwards compatibility access to ctx */ + public Map getCtx() { + return ctxMap; + } + + /** Return the metadata for this script */ + public Metadata metadata() { + return ctxMap.getMetadata(); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index 0b9f7f8972620..7996f3b36fde7 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -148,7 +148,7 @@ public boolean needs_score() { } else if (context.instanceClazz.equals(BytesRefSortScript.class)) { return context.factoryClazz.cast(new MockBytesRefSortScriptFactory(script)); } else if (context.instanceClazz.equals(IngestScript.class)) { - IngestScript.Factory factory = (parameters, metadata, ctx) -> new IngestScript(parameters, metadata, ctx) { + IngestScript.Factory factory = (parameters, ctx) -> new IngestScript(parameters, ctx) { @Override public void execute() { script.apply(ctx); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index b3928cae4dc90..905cb0e0146ec 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -440,7 +440,7 @@ public T compile(String name, String script, ScriptContext context, Map 0.0))); } if (context.name.equals("ingest")) { - IngestScript.Factory factory = (params, metadata, ctx) -> new IngestScript(params, metadata, ctx) { + IngestScript.Factory factory = (params, ctx) -> new IngestScript(params, ctx) { @Override public void execute() {} }; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java index 8749714d971fe..17000e1cd94fe 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java @@ -200,7 +200,7 @@ public T compile(String name, String script, ScriptContext context, Map 0.0))); } if (context.name.equals("ingest")) { - IngestScript.Factory factory = (vars, metadata, ctx) -> new IngestScript(vars, metadata, ctx) { + IngestScript.Factory factory = (vars, ctx) -> new IngestScript(vars, ctx) { @Override public void execute() {} }; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index d2bb3daa13f07..0de920ed76dbe 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -554,7 +554,7 @@ public T compile(String name, String script, ScriptContext context, Map 0.0))); } if (context.name.equals("ingest")) { - IngestScript.Factory factory = (vars, metadata, ctx) -> new IngestScript(vars, metadata, ctx) { + IngestScript.Factory factory = (vars, ctx) -> new IngestScript(vars, ctx) { @Override public void execute() {} }; From 895baf011cc1a5cbc4645a1841080ec214d17cbb Mon Sep 17 00:00:00 2001 From: Nikola Grcevski <6207777+grcevski@users.noreply.github.com> Date: Tue, 9 Aug 2022 17:11:55 -0400 Subject: [PATCH 154/265] Delete invalid settings for system indices (#88903) --- docs/changelog/88903.yaml | 5 ++ .../metadata/IndexMetadataVerifier.java | 59 ++++++++++++++----- .../settings/AbstractScopedSettings.java | 47 +++++++++++++++ .../metadata/IndexMetadataVerifierTests.java | 56 ++++++++++++++++-- 4 files changed, 145 insertions(+), 22 deletions(-) create mode 100644 docs/changelog/88903.yaml diff --git a/docs/changelog/88903.yaml b/docs/changelog/88903.yaml new file mode 100644 index 0000000000000..fa6bf77afe1de --- /dev/null +++ b/docs/changelog/88903.yaml @@ -0,0 +1,5 @@ +pr: 88903 +summary: Delete invalid settings for system indices +area: Infra/Core +type: bug +issues: [88324] diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java index 39a2754a17ecb..7c03e97d58ba1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java @@ -89,7 +89,7 @@ public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, Version mi newMetadata = removeTierFiltering(newMetadata); // Next we have to run this otherwise if we try to create IndexSettings // with broken settings it would fail in checkMappingsCompatibility - newMetadata = archiveBrokenIndexSettings(newMetadata); + newMetadata = archiveOrDeleteBrokenIndexSettings(newMetadata); checkMappingsCompatibility(newMetadata); return newMetadata; } @@ -205,27 +205,54 @@ public Set> entrySet() { /** * Identify invalid or unknown index settings and archive them. This leniency allows Elasticsearch to load * indices even if they contain old settings that are no longer valid. + * + * When we find an invalid setting on a system index, we simply remove it instead of archiving. System indices + * are managed by Elasticsearch and manual modification of settings is limited and sometimes impossible. */ - IndexMetadata archiveBrokenIndexSettings(IndexMetadata indexMetadata) { + IndexMetadata archiveOrDeleteBrokenIndexSettings(IndexMetadata indexMetadata) { final Settings settings = indexMetadata.getSettings(); - final Settings newSettings = indexScopedSettings.archiveUnknownOrInvalidSettings( - settings, - e -> logger.warn( - "{} ignoring unknown index setting: [{}] with value [{}]; archiving", - indexMetadata.getIndex(), - e.getKey(), - e.getValue() - ), - (e, ex) -> logger.warn( - () -> format( - "%s ignoring invalid index setting: [%s] with value [%s]; archiving", + final Settings newSettings; + + if (indexMetadata.isSystem()) { + newSettings = indexScopedSettings.deleteUnknownOrInvalidSettings( + settings, + e -> logger.warn( + "{} deleting unknown system index setting: [{}] with value [{}]", indexMetadata.getIndex(), e.getKey(), e.getValue() ), - ex - ) - ); + (e, ex) -> logger.warn( + () -> format( + "%s deleting invalid system index setting: [%s] with value [%s]", + indexMetadata.getIndex(), + e.getKey(), + e.getValue() + ), + ex + ) + ); + } else { + newSettings = indexScopedSettings.archiveUnknownOrInvalidSettings( + settings, + e -> logger.warn( + "{} ignoring unknown index setting: [{}] with value [{}]; archiving", + indexMetadata.getIndex(), + e.getKey(), + e.getValue() + ), + (e, ex) -> logger.warn( + () -> format( + "%s ignoring invalid index setting: [%s] with value [%s]; archiving", + indexMetadata.getIndex(), + e.getKey(), + e.getValue() + ), + ex + ) + ); + } + if (newSettings != settings) { return IndexMetadata.builder(indexMetadata).settings(newSettings).build(); } else { diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 6319c949e6b6d..4631641ecb119 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -956,6 +956,53 @@ public Settings archiveUnknownOrInvalidSettings( } } + /** + * Deletes invalid or unknown settings. Any setting that is not recognized or fails validation + * will be deleted. This behaviour is desired when dealing with unknown index settings on + * system indices. + * + * @param settings the {@link Settings} instance to scan for unknown or invalid settings + * @param unknownConsumer callback on unknown settings (consumer receives unknown key and its + * associated value) + * @param invalidConsumer callback on invalid settings (consumer receives invalid key, its + * associated value and an exception) + * @return a {@link Settings} instance with the unknown or invalid settings removed + */ + public Settings deleteUnknownOrInvalidSettings( + final Settings settings, + final Consumer> unknownConsumer, + final BiConsumer, IllegalArgumentException> invalidConsumer + ) { + Settings.Builder builder = Settings.builder(); + boolean changed = false; + for (String key : settings.keySet()) { + try { + Setting setting = get(key); + if (setting != null) { + // will throw IllegalArgumentException on invalid setting + setting.get(settings); + builder.copy(key, settings); + } else { + if (isPrivateSetting(key)) { + // will throw IllegalArgumentException on invalid setting + builder.copy(key, settings); + } else { + changed = true; + unknownConsumer.accept(new Entry(key, settings)); + } + } + } catch (IllegalArgumentException ex) { + changed = true; + invalidConsumer.accept(new Entry(key, settings), ex); + } + } + if (changed) { + return builder.build(); + } else { + return settings; + } + } + private record Entry(String key, Settings settings) implements Map.Entry { @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java index ad55c53bed5f3..e4a7bb60aca15 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java @@ -26,26 +26,61 @@ public class IndexMetadataVerifierTests extends ESTestCase { public void testArchiveBrokenIndexSettings() { IndexMetadataVerifier service = getIndexMetadataVerifier(); IndexMetadata src = newIndexMeta("foo", Settings.EMPTY); - IndexMetadata indexMetadata = service.archiveBrokenIndexSettings(src); + IndexMetadata indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); assertSame(indexMetadata, src); src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build()); - indexMetadata = service.archiveBrokenIndexSettings(src); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); assertNotSame(indexMetadata, src); assertEquals("-200", indexMetadata.getSettings().get("archived.index.refresh_interval")); src = newIndexMeta("foo", Settings.builder().put("index.codec", "best_compression1").build()); - indexMetadata = service.archiveBrokenIndexSettings(src); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); assertNotSame(indexMetadata, src); assertEquals("best_compression1", indexMetadata.getSettings().get("archived.index.codec")); src = newIndexMeta("foo", Settings.builder().put("index.refresh.interval", "-1").build()); - indexMetadata = service.archiveBrokenIndexSettings(src); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); assertNotSame(indexMetadata, src); assertEquals("-1", indexMetadata.getSettings().get("archived.index.refresh.interval")); src = newIndexMeta("foo", indexMetadata.getSettings()); // double archive? - indexMetadata = service.archiveBrokenIndexSettings(src); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertSame(indexMetadata, src); + } + + public void testDeleteBrokenSystemIndexSettings() { + IndexMetadataVerifier service = getIndexMetadataVerifier(); + IndexMetadata src = newSystemIndexMeta("foo", Settings.EMPTY); + IndexMetadata indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertSame(indexMetadata, src); + + src = newSystemIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build()); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertNotSame(indexMetadata, src); + assertNull(indexMetadata.getSettings().get("archived.index.refresh_interval")); + assertNull(indexMetadata.getSettings().get("index.refresh_interval")); + + // previously archived settings are removed + src = newSystemIndexMeta("foo", Settings.builder().put("archived.index.refresh_interval", "200").build()); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertNotSame(indexMetadata, src); + assertNull(indexMetadata.getSettings().get("archived.index.refresh_interval")); + + src = newSystemIndexMeta("foo", Settings.builder().put("index.codec", "best_compression1").build()); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertNotSame(indexMetadata, src); + assertNull(indexMetadata.getSettings().get("archived.index.codec")); + assertNull(indexMetadata.getSettings().get("index.codec")); + + src = newSystemIndexMeta("foo", Settings.builder().put("index.refresh.interval", "-1").build()); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertNotSame(indexMetadata, src); + assertNull(indexMetadata.getSettings().get("archived.index.refresh.interval")); + assertNull(indexMetadata.getSettings().get("index.refresh.interval")); + + src = newSystemIndexMeta("foo", indexMetadata.getSettings()); // double archive? + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); assertSame(indexMetadata, src); } @@ -108,6 +143,14 @@ private IndexMetadataVerifier getIndexMetadataVerifier() { } public static IndexMetadata newIndexMeta(String name, Settings indexSettings) { + return newIndexMetaBuilder(name, indexSettings).build(); + } + + public static IndexMetadata newSystemIndexMeta(String name, Settings indexSettings) { + return newIndexMetaBuilder(name, indexSettings).system(true).build(); + } + + private static IndexMetadata.Builder newIndexMetaBuilder(String name, Settings indexSettings) { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, randomIndexCompatibleVersion(random())) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, between(0, 5)) @@ -120,6 +163,7 @@ public static IndexMetadata newIndexMeta(String name, Settings indexSettings) { if (randomBoolean()) { indexMetadataBuilder.state(IndexMetadata.State.CLOSE); } - return indexMetadataBuilder.build(); + return indexMetadataBuilder; } + } From d663231a83bf456c41f6a3fdc9a2952ac4a7877e Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 10 Aug 2022 11:21:38 +1000 Subject: [PATCH 155/265] User Profile - GetProfile API nows supports multiple UIDs (#89023) This PR expands the existing GetProfile API to support getting multiple profiles by IDs. As a result, the response format is also changed to align with the latest version of API design guideline. Concretely, this means moving the profiles as an array inside a top level "profiles" field so that (1) does not mix dynamic fields (uid) with static fields and (2) enforcing an order in the response which is desirable for clients. The change also reports any error encounter in the retrieving process in a top level "errors" field. Relates: #81910 --- docs/changelog/89023.yaml | 5 + .../api/security.get_user_profile.json | 6 +- .../security/get-user-profile.asciidoc | 133 ++++++++------- .../update-user-profile-data.asciidoc | 58 +++---- .../xpack/core/common/ResultsAndErrors.java | 33 ++++ .../apikey/BulkUpdateApiKeyResponse.java | 17 +- .../action/profile/GetProfileRequest.java | 53 ------ ...fileAction.java => GetProfilesAction.java} | 6 +- .../action/profile/GetProfilesRequest.java | 69 ++++++++ .../action/profile/GetProfilesResponse.java | 45 +++-- .../authz/store/ReservedRolesStore.java | 4 +- .../core/security/xcontent/XContentUtils.java | 19 +++ .../profile/GetProfilesResponseTests.java | 155 ++++++++++++++++++ .../WriteProfileDataPrivilegesTests.java | 4 +- .../authz/store/ReservedRolesStoreTests.java | 24 +-- .../xpack/security/profile/ProfileIT.java | 126 ++++++++++---- .../profile/AbstractProfileIntegTestCase.java | 12 +- .../security/profile/ProfileIntegTests.java | 57 ++++++- .../xpack/security/Security.java | 10 +- ...n.java => TransportGetProfilesAction.java} | 20 ++- .../security/profile/ProfileService.java | 111 ++++++++----- ...Action.java => RestGetProfilesAction.java} | 17 +- .../security/profile/ProfileServiceTests.java | 138 ++++++++++------ .../xpack/security/test/SecurityMocks.java | 80 +++++++++ .../test/user_profile/10_basic.yml | 106 ++++++++++-- .../test/mixed_cluster/140_user_profile.yml | 9 +- 26 files changed, 951 insertions(+), 366 deletions(-) create mode 100644 docs/changelog/89023.yaml create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/ResultsAndErrors.java delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfileRequest.java rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/{GetProfileAction.java => GetProfilesAction.java} (73%) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesRequest.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponseTests.java rename x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/{TransportGetProfileAction.java => TransportGetProfilesAction.java} (57%) rename x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/{RestGetProfileAction.java => RestGetProfilesAction.java} (70%) diff --git a/docs/changelog/89023.yaml b/docs/changelog/89023.yaml new file mode 100644 index 0000000000000..e5258b48a6f92 --- /dev/null +++ b/docs/changelog/89023.yaml @@ -0,0 +1,5 @@ +pr: 89023 +summary: User Profile - `GetProfile` API nows supports multiple UIDs +area: Security +type: enhancement +issues: [] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_user_profile.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_user_profile.json index addce3c1b651f..0e14a802c260a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_user_profile.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_user_profile.json @@ -2,7 +2,7 @@ "security.get_user_profile":{ "documentation":{ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-profile.html", - "description":"Retrieves user profile for the given unique ID." + "description":"Retrieves user profiles for the given unique ID(s)." }, "stability":"experimental", "visibility":"private", @@ -18,8 +18,8 @@ ], "parts":{ "uid":{ - "type":"string", - "description":"An unique identifier of the user profile" + "type":"list", + "description":"A comma-separated list of unique identifier for user profiles" } } } diff --git a/x-pack/docs/en/rest-api/security/get-user-profile.asciidoc b/x-pack/docs/en/rest-api/security/get-user-profile.asciidoc index 7e1730f58ada5..91966a7137e74 100644 --- a/x-pack/docs/en/rest-api/security/get-user-profile.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-user-profile.asciidoc @@ -1,13 +1,13 @@ [role="xpack"] [[security-api-get-user-profile]] -=== Get user profile API +=== Get user profiles API ++++ -Get user profile +Get user profiles ++++ beta::[] -Retrieves a user's profile using the unique profile ID. +Retrieves user profiles using a list of unique profile ID. [[security-api-get-user-profile-request]] ==== {api-request-title} @@ -31,7 +31,8 @@ The get user profile API returns the user profile document matching a specified ==== {api-path-parms-title} `uid`:: -(Required, string) A unique identifier for the user profile. +(Required, string) The unique identifier for the user profile. You can specify multiple IDs as +a comma-separated list. [[security-api-get-user-profile-query-params]] ==== {api-query-parms-title} @@ -65,33 +66,35 @@ The API returns the following response for a `uid` matching `u_79HkWkwmnBH5gqFKw [source,console-result] ---- { - "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0": { - "uid": "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", - "enabled": true, - "last_synchronized": 1642650651037, - "user": { - "username": "jacknich", - "roles": [ - "admin", "other_role1" - ], - "realm_name": "native", - "full_name": "Jack Nicholson", - "email": "jacknich@example.com" - }, - "labels": { - "direction": "north" - }, - "data": {}, <1> - "_doc": { - "_primary_term": 88, - "_seq_no": 66 + "profiles": [ + { + "uid": "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", + "enabled": true, + "last_synchronized": 1642650651037, + "user": { + "username": "jacknich", + "roles": [ + "admin", "other_role1" + ], + "realm_name": "native", + "full_name": "Jack Nicholson", + "email": "jacknich@example.com" + }, + "labels": { + "direction": "north" + }, + "data": {}, <1> + "_doc": { + "_primary_term": 88, + "_seq_no": 66 + } } - } + ] } ---- -// TESTRESPONSE[s/1642650651037/$body.u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0.last_synchronized/] -// TESTRESPONSE[s/88/$body.u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0._doc._primary_term/] -// TESTRESPONSE[s/66/$body.u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0._doc._seq_no/] +// TESTRESPONSE[s/1642650651037/$body.profiles.0.last_synchronized/] +// TESTRESPONSE[s/88/$body.profiles.0._doc._primary_term/] +// TESTRESPONSE[s/66/$body.profiles.0._doc._seq_no/] <1> No content is returned in the `data` field by default. @@ -107,35 +110,55 @@ GET /_security/profile/u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0?data=app1 [source,console-result] ---- { - "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0": { - "uid": "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", - "enabled": true, - "last_synchronized": 1642650651037, - "user": { - "username": "jacknich", - "roles": [ - "admin", "other_role1" - ], - "realm_name": "native", - "full_name": "Jack Nicholson", - "email": "jacknich@example.com" - }, - "labels": { - "direction": "north" - }, - "data": { - "app1": { - "key1": "value1" + "profiles": [ + { + "uid": "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", + "enabled": true, + "last_synchronized": 1642650651037, + "user": { + "username": "jacknich", + "roles": [ + "admin", "other_role1" + ], + "realm_name": "native", + "full_name": "Jack Nicholson", + "email": "jacknich@example.com" + }, + "labels": { + "direction": "north" + }, + "data": { + "app1": { + "key1": "value1" + } + }, + "_doc": { + "_primary_term": 88, + "_seq_no": 66 } - }, - "_doc": { - "_primary_term": 88, - "_seq_no": 66 } - } + ] } ---- -// TESTRESPONSE[s/1642650651037/$body.u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0.last_synchronized/] -// TESTRESPONSE[s/88/$body.u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0._doc._primary_term/] -// TESTRESPONSE[s/66/$body.u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0._doc._seq_no/] +// TESTRESPONSE[s/1642650651037/$body.profiles.0.last_synchronized/] +// TESTRESPONSE[s/88/$body.profiles.0._doc._primary_term/] +// TESTRESPONSE[s/66/$body.profiles.0._doc._seq_no/] + +If there has been any errors when retrieving the user profiles, they are returned in the `errors` field: +[source,js] +-------------------------------------------------- +{ + "profiles": [], + "errors": { + "count": 1, + "details": { + "u_FmxQt3gr1BBH5wpnz9HkouPj3Q710XkOgg1PWkwLPBW_5": { + "type": "resource_not_found_exception", + "reason": "profile document not found" + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/x-pack/docs/en/rest-api/security/update-user-profile-data.asciidoc b/x-pack/docs/en/rest-api/security/update-user-profile-data.asciidoc index bf9698149b63d..a3b9f49f2f70b 100644 --- a/x-pack/docs/en/rest-api/security/update-user-profile-data.asciidoc +++ b/x-pack/docs/en/rest-api/security/update-user-profile-data.asciidoc @@ -137,35 +137,37 @@ GET /_security/profile/u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0?data=* [source,console-result] ---- { - "u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0": { - "uid": "u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0", - "enabled": true, - "last_synchronized": 1642650651037, - "user": { - "username": "jackrea", - "roles": [ - "admin" - ], - "realm_name": "native", - "full_name": "Jack Reacher", - "email": "jackrea@example.com" - }, - "labels": { - "direction": "west" - }, - "data": { - "app1": { - "theme": "default", - "font": "large" + "profiles": [ + { + "uid": "u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0", + "enabled": true, + "last_synchronized": 1642650651037, + "user": { + "username": "jackrea", + "roles": [ + "admin" + ], + "realm_name": "native", + "full_name": "Jack Reacher", + "email": "jackrea@example.com" + }, + "labels": { + "direction": "west" + }, + "data": { + "app1": { + "theme": "default", + "font": "large" + } + }, + "_doc": { + "_primary_term": 88, + "_seq_no": 66 } - }, - "_doc": { - "_primary_term": 88, - "_seq_no": 66 } - } + ] } ---- -// TESTRESPONSE[s/1642650651037/$body.u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0.last_synchronized/] -// TESTRESPONSE[s/88/$body.u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0._doc._primary_term/] -// TESTRESPONSE[s/66/$body.u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0._doc._seq_no/] +// TESTRESPONSE[s/1642650651037/$body.profiles.0.last_synchronized/] +// TESTRESPONSE[s/88/$body.profiles.0._doc._primary_term/] +// TESTRESPONSE[s/66/$body.profiles.0._doc._seq_no/] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/ResultsAndErrors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/ResultsAndErrors.java new file mode 100644 index 0000000000000..855d6c9d0f640 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/ResultsAndErrors.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.common; + +import java.util.Collection; +import java.util.List; +import java.util.Map; + +/** + * A record class encapsulate a collection of results and associated errors. An intended usage is to model the + * generic MultiGetResponse to domain specific ones. The results are a collection of entity objects translated + * from the documents retrieved by MultiGet and the errors are a map key by IDs and any exception encountered + * when attempt retrieving associated documents. + */ +public record ResultsAndErrors (Collection results, Map errors) { + + private static final ResultsAndErrors EMPTY = new ResultsAndErrors<>(List.of(), Map.of()); + + public boolean isEmpty() { + return results.isEmpty() && errors.isEmpty(); + } + + @SuppressWarnings("unchecked") + public static ResultsAndErrors empty() { + return (ResultsAndErrors) EMPTY; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java index 7ea0e1fcba4a4..c6b9822b2ae75 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java @@ -7,13 +7,13 @@ package org.elasticsearch.xpack.core.security.action.apikey; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import java.io.IOException; import java.util.ArrayList; @@ -59,20 +59,7 @@ public int getTotalResultCount() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject().stringListField("updated", updated).stringListField("noops", noops); - if (errorDetails.isEmpty() == false) { - builder.startObject("errors"); - { - builder.field("count", errorDetails.size()); - builder.startObject("details"); - for (Map.Entry idWithException : errorDetails.entrySet()) { - builder.startObject(idWithException.getKey()); - ElasticsearchException.generateThrowableXContent(builder, params, idWithException.getValue()); - builder.endObject(); - } - builder.endObject(); - } - builder.endObject(); - } + XContentUtils.maybeAddErrorDetails(builder, errorDetails); return builder.endObject(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfileRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfileRequest.java deleted file mode 100644 index 17e91b4b8c984..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfileRequest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.core.security.action.profile; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.Set; - -public class GetProfileRequest extends ActionRequest { - - private final String uid; - private final Set dataKeys; - - public GetProfileRequest(String uid, Set dataKeys) { - this.uid = uid; - this.dataKeys = dataKeys; - } - - public GetProfileRequest(StreamInput in) throws IOException { - super(in); - this.uid = in.readString(); - this.dataKeys = in.readSet(StreamInput::readString); - } - - public String getUid() { - return uid; - } - - public Set getDataKeys() { - return dataKeys; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(uid); - out.writeStringCollection(dataKeys); - } - - @Override - public ActionRequestValidationException validate() { - return null; - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfileAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesAction.java similarity index 73% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfileAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesAction.java index f2648cf7aea39..2ba86a1559588 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfileAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesAction.java @@ -9,12 +9,12 @@ import org.elasticsearch.action.ActionType; -public class GetProfileAction extends ActionType { +public class GetProfilesAction extends ActionType { public static final String NAME = "cluster:admin/xpack/security/profile/get"; - public static final GetProfileAction INSTANCE = new GetProfileAction(); + public static final GetProfilesAction INSTANCE = new GetProfilesAction(); - public GetProfileAction() { + public GetProfilesAction() { super(NAME, GetProfilesResponse::new); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesRequest.java new file mode 100644 index 0000000000000..8246c71ff2734 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesRequest.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.profile; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class GetProfilesRequest extends ActionRequest { + + private final List uids; + private final Set dataKeys; + + public GetProfilesRequest(List uids, Set dataKeys) { + this.uids = Objects.requireNonNull(uids, "profile UIDs cannot be null"); + this.dataKeys = Objects.requireNonNull(dataKeys, "data keys cannot be null"); + } + + public GetProfilesRequest(StreamInput in) throws IOException { + super(in); + this.uids = in.readStringList(); + this.dataKeys = in.readSet(StreamInput::readString); + } + + public GetProfilesRequest(String uid, Set dataKeys) { + this(List.of(Objects.requireNonNull(uid, "profile UID cannot be null")), dataKeys); + } + + public List getUids() { + return uids; + } + + public Set getDataKeys() { + return dataKeys; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringCollection(uids); + out.writeStringCollection(dataKeys); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (uids.isEmpty()) { + validationException = addValidationError("profile UIDs must be provided", validationException); + } + if (uids.stream().anyMatch(uid -> false == Strings.hasText(uid))) { + validationException = addValidationError("Profile UID cannot be empty", validationException); + } + return validationException; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponse.java index 0bce941f59e8f..5dce557821110 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponse.java @@ -10,48 +10,57 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.StatusToXContentObject; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; -public class GetProfilesResponse extends ActionResponse implements StatusToXContentObject { +public class GetProfilesResponse extends ActionResponse implements ToXContentObject { - private final Profile[] profiles; + private final List profiles; + private final Map errors; - public GetProfilesResponse(@Nullable Profile profile) { - this.profiles = profile != null ? new Profile[] { profile } : new Profile[0]; + public GetProfilesResponse(List profiles, Map errors) { + this.profiles = Objects.requireNonNull(profiles); + this.errors = Objects.requireNonNull(errors); } public GetProfilesResponse(StreamInput in) throws IOException { super(in); - this.profiles = in.readArray(Profile::new, Profile[]::new); + this.profiles = in.readImmutableList(Profile::new); + this.errors = in.readMap(StreamInput::readString, StreamInput::readException); } - public Profile[] getProfiles() { + public List getProfiles() { return profiles; } + public Map getErrors() { + return errors; + } + @Override public void writeTo(StreamOutput out) throws IOException { - out.writeArray(profiles); + out.writeList(profiles); + out.writeMap(errors, StreamOutput::writeString, StreamOutput::writeException); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - for (Profile profile : profiles) { - builder.field(profile.uid()); - profile.toXContent(builder, params); + { + builder.startArray("profiles"); + for (Profile profile : profiles) { + profile.toXContent(builder, params); + } + builder.endArray(); + XContentUtils.maybeAddErrorDetails(builder, errors); } builder.endObject(); return builder; } - - @Override - public RestStatus status() { - return profiles.length > 0 ? RestStatus.OK : RestStatus.NOT_FOUND; - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index dccb3396c782b..e4f8b72e55efb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -21,7 +21,7 @@ import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesAction; import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileAction; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileAction; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesAction; import org.elasticsearch.xpack.core.security.action.profile.SuggestProfilesAction; import org.elasticsearch.xpack.core.security.action.user.ProfileHasPrivilegesAction; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; @@ -667,7 +667,7 @@ public static RoleDescriptor kibanaSystemRoleDescriptor(String name) { "manage_own_api_key", GetBuiltinPrivilegesAction.NAME, "delegate_pki", - GetProfileAction.NAME, + GetProfilesAction.NAME, ActivateProfileAction.NAME, SuggestProfilesAction.NAME, ProfileHasPrivilegesAction.NAME, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/xcontent/XContentUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/xcontent/XContentUtils.java index ca33b5ae422fc..ed92541f7c8ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/xcontent/XContentUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/xcontent/XContentUtils.java @@ -6,7 +6,9 @@ */ package org.elasticsearch.xpack.core.security.xcontent; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; @@ -110,4 +112,21 @@ public static void addAuthorizationInfo(final XContentBuilder builder, final Map } builder.endObject(); } + + public static void maybeAddErrorDetails(XContentBuilder builder, Map errors) throws IOException { + if (false == errors.isEmpty()) { + builder.startObject("errors"); + { + builder.field("count", errors.size()); + builder.startObject("details"); + for (Map.Entry idWithException : errors.entrySet()) { + builder.startObject(idWithException.getKey()); + ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, idWithException.getValue()); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponseTests.java new file mode 100644 index 0000000000000..20177be5cc477 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponseTests.java @@ -0,0 +1,155 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.profile; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import static org.hamcrest.Matchers.equalTo; + +public class GetProfilesResponseTests extends ESTestCase { + + public void testToXContent() throws IOException { + final boolean hasErrors = randomBoolean(); + + final Map errors; + if (hasErrors) { + // Force ordered key set for deterministic comparison with raw JSON string below + errors = new TreeMap<>(); + errors.put("u_user_foo_bar_1", new IllegalArgumentException("msg1")); + errors.put("u_user_foo_bar_2", new ResourceNotFoundException("not found")); + errors.put("u_user_foo_bar_3", new ElasticsearchException("error1", new IllegalArgumentException("msg2"))); + } else { + errors = Map.of(); + } + + final boolean hasProfiles = randomBoolean() || false == hasErrors; + final List profiles; + if (hasProfiles) { + profiles = List.of( + new Profile( + "u_profile_user_0", + true, + 0L, + new Profile.ProfileUser("profile_user", List.of("user"), "realm_1", null, null, null), + Map.of("label", "value"), + Map.of("data", "value2"), + new Profile.VersionControl(1, 1) + ), + new Profile( + "u_profile_admin_0", + false, + 1L, + new Profile.ProfileUser("profile_admin", List.of("admin"), "realm_2", "domain_2", "admin@example.org", "profile admin"), + Map.of(), + Map.of(), + new Profile.VersionControl(2, 2) + ) + ); + } else { + profiles = List.of(); + } + + final var response = new GetProfilesResponse(profiles, errors); + final XContentBuilder builder = XContentFactory.jsonBuilder(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + + final StringBuilder sb = new StringBuilder("{ \"profiles\": "); + if (hasProfiles) { + sb.append(""" + [ + { + "uid": "u_profile_user_0", + "enabled": true, + "last_synchronized": 0, + "user": { + "username": "profile_user", + "roles": [ + "user" + ], + "realm_name": "realm_1" + }, + "labels": { + "label": "value" + }, + "data": { + "data": "value2" + }, + "_doc": { + "_primary_term": 1, + "_seq_no": 1 + } + }, + { + "uid": "u_profile_admin_0", + "enabled": false, + "last_synchronized": 1, + "user": { + "username": "profile_admin", + "roles": [ + "admin" + ], + "realm_name": "realm_2", + "realm_domain": "domain_2", + "email": "admin@example.org", + "full_name": "profile admin" + }, + "labels": {}, + "data": {}, + "_doc": { + "_primary_term": 2, + "_seq_no": 2 + } + } + ]"""); + } else { + sb.append("[]"); + } + + if (hasErrors) { + sb.append(", \"errors\": "); + sb.append(""" + { + "count": 3, + "details": { + "u_user_foo_bar_1": { + "type": "illegal_argument_exception", + "reason": "msg1" + }, + "u_user_foo_bar_2": { + "type": "resource_not_found_exception", + "reason": "not found" + }, + "u_user_foo_bar_3": { + "type": "exception", + "reason": "error1", + "caused_by": { + "type": "illegal_argument_exception", + "reason": "msg2" + } + } + } + } + """); + } + sb.append("}"); + + assertThat(Strings.toString(builder), equalTo(XContentHelper.stripWhitespace(sb.toString()))); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/WriteProfileDataPrivilegesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/WriteProfileDataPrivilegesTests.java index 88e06c98e75cd..057daf257fd85 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/WriteProfileDataPrivilegesTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/WriteProfileDataPrivilegesTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileAction; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileAction; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesAction; import org.elasticsearch.xpack.core.security.action.profile.SuggestProfilesAction; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataAction; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataRequest; @@ -122,7 +122,7 @@ public void testActionAndRequestPredicate() { // different action name assertFalse( writeProfileDataPermission.check( - randomFrom(ActivateProfileAction.NAME, GetProfileAction.NAME, SuggestProfilesAction.NAME), + randomFrom(ActivateProfileAction.NAME, GetProfilesAction.NAME, SuggestProfilesAction.NAME), updateProfileDataRequest, authentication ) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index a265cb48f2f46..a61aafeb89ccd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -163,7 +163,7 @@ import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesRequest; import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileAction; import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileRequest; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileAction; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesAction; import org.elasticsearch.xpack.core.security.action.profile.SuggestProfilesAction; import org.elasticsearch.xpack.core.security.action.profile.SuggestProfilesRequest; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataAction; @@ -366,7 +366,7 @@ public void testIngestAdminRole() { assertThat(ingestAdminRole.cluster().check(ActivateProfileAction.NAME, request, authentication), is(false)); assertThat(ingestAdminRole.cluster().check(SuggestProfilesAction.NAME, request, authentication), is(false)); assertThat(ingestAdminRole.cluster().check(UpdateProfileDataAction.NAME, request, authentication), is(false)); - assertThat(ingestAdminRole.cluster().check(GetProfileAction.NAME, request, authentication), is(false)); + assertThat(ingestAdminRole.cluster().check(GetProfilesAction.NAME, request, authentication), is(false)); assertThat(ingestAdminRole.cluster().check(ProfileHasPrivilegesAction.NAME, request, authentication), is(false)); assertThat(ingestAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction("foo")), is(false)); @@ -468,7 +468,7 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.cluster().check(GetBuiltinPrivilegesAction.NAME, request, authentication), is(true)); // User profile - assertThat(kibanaRole.cluster().check(GetProfileAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(GetProfilesAction.NAME, request, authentication), is(true)); assertThat(kibanaRole.cluster().check(ActivateProfileAction.NAME, request, authentication), is(true)); assertThat(kibanaRole.cluster().check(SuggestProfilesAction.NAME, request, authentication), is(true)); assertThat(kibanaRole.cluster().check(ProfileHasPrivilegesAction.NAME, request, authentication), is(true)); @@ -1223,7 +1223,7 @@ public void testMonitoringUserRole() { assertThat(monitoringUserRole.cluster().check(ActivateProfileAction.NAME, request, authentication), is(false)); assertThat(monitoringUserRole.cluster().check(SuggestProfilesAction.NAME, request, authentication), is(false)); assertThat(monitoringUserRole.cluster().check(UpdateProfileDataAction.NAME, request, authentication), is(false)); - assertThat(monitoringUserRole.cluster().check(GetProfileAction.NAME, request, authentication), is(false)); + assertThat(monitoringUserRole.cluster().check(GetProfilesAction.NAME, request, authentication), is(false)); assertThat(monitoringUserRole.cluster().check(ProfileHasPrivilegesAction.NAME, request, authentication), is(false)); assertThat(monitoringUserRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); @@ -1316,7 +1316,7 @@ public void testRemoteMonitoringAgentRole() { assertThat(remoteMonitoringAgentRole.cluster().check(ActivateProfileAction.NAME, request, authentication), is(false)); assertThat(remoteMonitoringAgentRole.cluster().check(SuggestProfilesAction.NAME, request, authentication), is(false)); assertThat(remoteMonitoringAgentRole.cluster().check(UpdateProfileDataAction.NAME, request, authentication), is(false)); - assertThat(remoteMonitoringAgentRole.cluster().check(GetProfileAction.NAME, request, authentication), is(false)); + assertThat(remoteMonitoringAgentRole.cluster().check(GetProfilesAction.NAME, request, authentication), is(false)); assertThat(remoteMonitoringAgentRole.cluster().check(ProfileHasPrivilegesAction.NAME, request, authentication), is(false)); // ILM assertThat(remoteMonitoringAgentRole.cluster().check(GetLifecycleAction.NAME, request, authentication), is(true)); @@ -1801,7 +1801,7 @@ public void testSuperuserRole() { superuserRole.cluster().check(UpdateProfileDataAction.NAME, mock(UpdateProfileDataRequest.class), authentication), is(true) ); - assertThat(superuserRole.cluster().check(GetProfileAction.NAME, mock(UpdateProfileDataRequest.class), authentication), is(true)); + assertThat(superuserRole.cluster().check(GetProfilesAction.NAME, mock(UpdateProfileDataRequest.class), authentication), is(true)); assertThat(superuserRole.cluster().check(SuggestProfilesAction.NAME, mock(SuggestProfilesRequest.class), authentication), is(true)); assertThat(superuserRole.cluster().check(ActivateProfileAction.NAME, mock(ActivateProfileRequest.class), authentication), is(true)); @@ -1962,7 +1962,7 @@ public void testBeatsAdminRole() { assertThat(beatsAdminRole.cluster().check(ActivateProfileAction.NAME, request, authentication), is(false)); assertThat(beatsAdminRole.cluster().check(SuggestProfilesAction.NAME, request, authentication), is(false)); assertThat(beatsAdminRole.cluster().check(UpdateProfileDataAction.NAME, request, authentication), is(false)); - assertThat(beatsAdminRole.cluster().check(GetProfileAction.NAME, request, authentication), is(false)); + assertThat(beatsAdminRole.cluster().check(GetProfilesAction.NAME, request, authentication), is(false)); assertThat(beatsAdminRole.cluster().check(ProfileHasPrivilegesAction.NAME, request, authentication), is(false)); assertThat(beatsAdminRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); @@ -2009,7 +2009,7 @@ public void testBeatsSystemRole() { assertThat(beatsSystemRole.cluster().check(ActivateProfileAction.NAME, request, authentication), is(false)); assertThat(beatsSystemRole.cluster().check(SuggestProfilesAction.NAME, request, authentication), is(false)); assertThat(beatsSystemRole.cluster().check(UpdateProfileDataAction.NAME, request, authentication), is(false)); - assertThat(beatsSystemRole.cluster().check(GetProfileAction.NAME, request, authentication), is(false)); + assertThat(beatsSystemRole.cluster().check(GetProfilesAction.NAME, request, authentication), is(false)); assertThat(beatsSystemRole.cluster().check(ProfileHasPrivilegesAction.NAME, request, authentication), is(false)); assertThat(beatsSystemRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); @@ -2308,7 +2308,7 @@ public void testMachineLearningUserRole() { assertThat(role.cluster().check(ActivateProfileAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(SuggestProfilesAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(UpdateProfileDataAction.NAME, request, authentication), is(false)); - assertThat(role.cluster().check(GetProfileAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(GetProfilesAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(ProfileHasPrivilegesAction.NAME, request, authentication), is(false)); assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); @@ -2432,7 +2432,7 @@ public void testTransformUserRole() { assertThat(role.cluster().check(ActivateProfileAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(SuggestProfilesAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(UpdateProfileDataAction.NAME, request, authentication), is(false)); - assertThat(role.cluster().check(GetProfileAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(GetProfilesAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(ProfileHasPrivilegesAction.NAME, request, authentication), is(false)); assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); @@ -2558,7 +2558,7 @@ public void testPredefinedViewerRole() { assertThat(role.cluster().check(ActivateProfileAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(SuggestProfilesAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(UpdateProfileDataAction.NAME, request, authentication), is(false)); - assertThat(role.cluster().check(GetProfileAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(GetProfilesAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(ProfileHasPrivilegesAction.NAME, request, authentication), is(false)); // Check index privileges assertOnlyReadAllowed(role, "observability-annotations"); @@ -2617,7 +2617,7 @@ public void testPredefinedEditorRole() { assertThat(role.cluster().check(ActivateProfileAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(SuggestProfilesAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(UpdateProfileDataAction.NAME, request, authentication), is(false)); - assertThat(role.cluster().check(GetProfileAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(GetProfilesAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(ProfileHasPrivilegesAction.NAME, request, authentication), is(false)); // Check index privileges diff --git a/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java b/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java index 14f6ddaa33485..be60db248fede 100644 --- a/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java +++ b/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java @@ -10,6 +10,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -22,13 +23,16 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.IntStream; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.nullValue; public class ProfileIT extends ESRestTestCase { @@ -38,7 +42,7 @@ public class ProfileIT extends ESRestTestCase { "uid": "%s", "enabled": true, "user": { - "username": "Foo", + "username": "%s", "roles": [ "role1", "role2" @@ -115,37 +119,81 @@ public void testProfileHasPrivileges() throws IOException { assertThat(((List) profileHasPrivilegesResponseMap.get("has_privilege_uids")), contains(profileUid)); } - public void testGetProfile() throws IOException { - final String uid = randomAlphaOfLength(20); - final String source = SAMPLE_PROFILE_DOCUMENT_TEMPLATE.formatted(uid, Instant.now().toEpochMilli()); - final Request indexRequest = new Request("PUT", ".security-profile/_doc/profile_" + uid); - indexRequest.setJsonEntity(source); - indexRequest.addParameter("refresh", "wait_for"); - indexRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-profile-8], but in a future major version, " - + "direct access to system indices will be prevented by default" - ) - ); - assertOK(adminClient().performRequest(indexRequest)); + public void testGetProfiles() throws IOException { + final List uids = randomList(1, 3, () -> randomAlphaOfLength(20)); - final Map profileMap1 = doGetProfile(uid); - assertThat(castToMap(profileMap1.get("user")).get("realm_name"), equalTo("realm_name_1")); - assertThat(castToMap(profileMap1.get("user")).get("realm_domain"), equalTo("domainA")); - assertThat(castToMap(profileMap1.get("data")), anEmptyMap()); + // Profile index does not exist yet + final Map responseMap0 = doGetProfiles(uids, null); + @SuppressWarnings("unchecked") + final List profiles0 = (List) responseMap0.get("profiles"); + assertThat(profiles0, empty()); + final Map errors0 = castToMap(responseMap0.get("errors")); + assertThat(errors0.get("count"), equalTo(uids.size())); + final Map errorDetails0 = castToMap(errors0.get("details")); + assertThat(errorDetails0.keySet(), equalTo(Set.copyOf(uids))); + errorDetails0.values().forEach(value -> assertThat(castToMap(value).get("reason"), equalTo("profile index does not exist"))); + + // Create the profile documents + for (String uid : uids) { + final String source = SAMPLE_PROFILE_DOCUMENT_TEMPLATE.formatted(uid, uid, Instant.now().toEpochMilli()); + final Request indexRequest = new Request("PUT", ".security-profile/_doc/profile_" + uid); + indexRequest.setJsonEntity(source); + indexRequest.addParameter("refresh", "wait_for"); + indexRequest.setOptions( + expectWarnings( + "this request accesses system indices: [.security-profile-8], but in a future major version, " + + "direct access to system indices will be prevented by default" + ) + ); + assertOK(adminClient().performRequest(indexRequest)); + } + + // Now retrieve profiles created above + final Map responseMap1 = doGetProfiles(uids, null); + @SuppressWarnings("unchecked") + final List> profiles1 = (List>) responseMap1.get("profiles"); + assertThat(profiles1.size(), equalTo(uids.size())); + IntStream.range(0, profiles1.size()).forEach(i -> { + final Map profileMap = profiles1.get(i); + final String uid = uids.get(i); + assertThat(profileMap.get("uid"), equalTo(uid)); + assertThat(castToMap(profileMap.get("user")).get("username"), equalTo(uid)); + assertThat(castToMap(profileMap.get("user")).get("realm_name"), equalTo("realm_name_1")); + assertThat(castToMap(profileMap.get("user")).get("realm_domain"), equalTo("domainA")); + assertThat(castToMap(profileMap.get("data")), anEmptyMap()); + }); // Retrieve application data along the profile - final Map profileMap2 = doGetProfile(uid, "app1"); - assertThat(castToMap(profileMap2.get("data")), equalTo(Map.of("app1", Map.of("name", "app1")))); + final Map responseMap2 = doGetProfiles(uids, "app1"); + @SuppressWarnings("unchecked") + final List> profiles2 = (List>) responseMap2.get("profiles"); + assertThat(profiles2.size(), equalTo(uids.size())); + IntStream.range(0, profiles2.size()).forEach(i -> { + final Map profileMap = profiles2.get(i); + assertThat(castToMap(profileMap.get("data")), equalTo(Map.of("app1", Map.of("name", "app1")))); + }); // Retrieve multiple application data - final Map profileMap3 = doGetProfile(uid, randomFrom("app1,app2", "*", "app*")); - assertThat(castToMap(profileMap3.get("data")), equalTo(Map.of("app1", Map.of("name", "app1"), "app2", Map.of("name", "app2")))); - - // Non-existing profile - final Request getProfileRequest4 = new Request("GET", "_security/profile/not_" + uid); - final ResponseException e4 = expectThrows(ResponseException.class, () -> adminClient().performRequest(getProfileRequest4)); - assertThat(e4.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + final Map responseMap3 = doGetProfiles(uids, randomFrom("app1,app2", "*", "app*")); + @SuppressWarnings("unchecked") + final List> profiles3 = (List>) responseMap3.get("profiles"); + assertThat(profiles3.size(), equalTo(uids.size())); + IntStream.range(0, profiles3.size()).forEach(i -> { + final Map profileMap = profiles3.get(i); + assertThat(castToMap(profileMap.get("data")), equalTo(Map.of("app1", Map.of("name", "app1"), "app2", Map.of("name", "app2")))); + }); + + // Non-existing profiles + final List notUids = uids.stream().map(uid -> "not_" + uid).toList(); + final Map responseMap4 = doGetProfiles(notUids, null); + @SuppressWarnings("unchecked") + final List profiles4 = (List) responseMap4.get("profiles"); + assertThat(profiles4, empty()); + final Map errors4 = castToMap(responseMap4.get("errors")); + assertThat(errors4.get("count"), equalTo(notUids.size())); + final Map errorDetails4 = castToMap(errors4.get("details")); + assertThat(errorDetails4.keySet(), equalTo(Set.copyOf(notUids))); + errorDetails4.values().forEach(value -> assertThat(castToMap(value).get("type"), equalTo("resource_not_found_exception"))); } public void testUpdateProfileData() throws IOException { @@ -311,15 +359,25 @@ private Map doGetProfile(String uid) throws IOException { } private Map doGetProfile(String uid, @Nullable String dataKey) throws IOException { - final Request getProfileRequest1 = new Request("GET", "_security/profile/" + uid); + final Map responseMap = doGetProfiles(List.of(uid), dataKey); + assertThat(responseMap.get("errors"), nullValue()); + + @SuppressWarnings("unchecked") + final List> profiles = (List>) responseMap.get("profiles"); + assertThat(profiles.size(), equalTo(1)); + final Map profileMap = profiles.get(0); + assertThat(profileMap.get("uid"), equalTo(uid)); + return profileMap; + } + + private Map doGetProfiles(List uids, @Nullable String dataKey) throws IOException { + final Request getProfilesRequest = new Request("GET", "_security/profile/" + Strings.collectionToCommaDelimitedString(uids)); if (dataKey != null) { - getProfileRequest1.addParameter("data", dataKey); + getProfilesRequest.addParameter("data", dataKey); } - final Response getProfileResponse1 = adminClient().performRequest(getProfileRequest1); - assertOK(getProfileResponse1); - final Map getProfileMap1 = responseAsMap(getProfileResponse1); - assertThat(getProfileMap1.keySet(), contains(uid)); - return castToMap(getProfileMap1.get(uid)); + final Response getProfilesResponse = adminClient().performRequest(getProfilesRequest); + assertOK(getProfilesResponse); + return responseAsMap(getProfilesResponse); } private void doSetEnabled(String uid, boolean enabled) throws IOException { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/AbstractProfileIntegTestCase.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/AbstractProfileIntegTestCase.java index 173ef90714ab7..f05580a1f43b3 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/AbstractProfileIntegTestCase.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/AbstractProfileIntegTestCase.java @@ -13,8 +13,8 @@ import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileAction; import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileRequest; import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileResponse; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileAction; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileRequest; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesAction; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesRequest; import org.elasticsearch.xpack.core.security.action.profile.GetProfilesResponse; import org.elasticsearch.xpack.core.security.action.profile.Profile; import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; @@ -28,8 +28,8 @@ import static org.elasticsearch.test.SecuritySettingsSource.TEST_PASSWORD_HASHED; import static org.hamcrest.Matchers.anEmptyMap; -import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -123,10 +123,10 @@ protected Profile doActivateProfile(String username, SecureString password, bool } protected Profile getProfile(String uid, Set dataKeys) { - final GetProfilesResponse getProfilesResponse = client().execute(GetProfileAction.INSTANCE, new GetProfileRequest(uid, dataKeys)) + final GetProfilesResponse getProfilesResponse = client().execute(GetProfilesAction.INSTANCE, new GetProfilesRequest(uid, dataKeys)) .actionGet(); - assertThat(getProfilesResponse.getProfiles(), arrayWithSize(1)); - return getProfilesResponse.getProfiles()[0]; + assertThat(getProfilesResponse.getProfiles(), hasSize(1)); + return getProfilesResponse.getProfiles().get(0); } protected T getInstanceFromRandomNode(Class clazz) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java index b085aef7e450d..b7ccd0b1698fe 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.security.profile; import org.apache.lucene.search.TotalHits; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; @@ -18,10 +19,11 @@ import org.elasticsearch.index.engine.DocumentMissingException; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.common.ResultsAndErrors; import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesAction; import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesRequest; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileAction; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileRequest; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesAction; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesRequest; import org.elasticsearch.xpack.core.security.action.profile.GetProfilesResponse; import org.elasticsearch.xpack.core.security.action.profile.Profile; import org.elasticsearch.xpack.core.security.action.profile.SetProfileEnabledAction; @@ -51,12 +53,14 @@ import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.IntStream; import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -74,6 +78,7 @@ import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -202,6 +207,42 @@ public void testActivateProfile() { assertThat(getProfile(profile5.uid(), Set.of("my_app")).applicationData(), equalTo(Map.of("my_app", Map.of("theme", "default")))); } + public void testGetProfiles() { + final ProfileService profileService = getInstanceFromRandomNode(ProfileService.class); + final List allUids = new ArrayList<>(); + // Activate a few profiles + IntStream.range(0, 5).forEach(i -> { + final Authentication authentication = AuthenticationTestHelper.builder() + .user(new User(randomAlphaOfLengthBetween(3, 8) + i)) + .realm(false) + .build(); + final PlainActionFuture future = new PlainActionFuture<>(); + profileService.activateProfile(authentication, future); + allUids.add(future.actionGet().uid()); + }); + + final List requestedUids = new ArrayList<>(randomNonEmptySubsetOf(allUids)); + String nonExistingUid = null; + if (randomBoolean()) { + // request a non-existing uid + nonExistingUid = randomValueOtherThanMany(allUids::contains, () -> randomAlphaOfLength(20)); + requestedUids.add(nonExistingUid); + } + final PlainActionFuture future = new PlainActionFuture<>(); + client().execute(GetProfilesAction.INSTANCE, new GetProfilesRequest(requestedUids, Set.of()), future); + final GetProfilesResponse getProfilesResponse = future.actionGet(); + final List profiles = getProfilesResponse.getProfiles(); + if (nonExistingUid == null) { + assertThat(getProfilesResponse.getErrors(), anEmptyMap()); + assertThat(profiles.stream().map(Profile::uid).toList(), equalTo(requestedUids)); + } else { + assertThat(getProfilesResponse.getErrors().keySet(), equalTo(Set.of(nonExistingUid))); + final Exception e = getProfilesResponse.getErrors().get(nonExistingUid); + assertThat(e, instanceOf(ResourceNotFoundException.class)); + assertThat(profiles.stream().map(Profile::uid).toList(), equalTo(requestedUids.subList(0, requestedUids.size() - 1))); + } + } + public void testUpdateProfileData() { final Profile profile1 = doActivateProfile(RAC_USER_NAME, TEST_PASSWORD_SECURE_STRING); @@ -427,9 +468,9 @@ public void testSuggestProfilesWithHint() throws IOException { future2 ); assertThat(future2.actionGet().isAcknowledged(), is(true)); - final PlainActionFuture future3 = new PlainActionFuture<>(); - profileService.getProfile(profile.uid(), Set.of(), future3); - return future3.actionGet(); + final PlainActionFuture> future3 = new PlainActionFuture<>(); + profileService.getProfiles(List.of(profile.uid()), Set.of(), future3); + return future3.actionGet().results().iterator().next(); }).toList(); // Default order of last synchronized timestamp @@ -514,10 +555,10 @@ public void testProfileAPIsWhenIndexNotCreated() { // Get Profile by ID returns empty result final GetProfilesResponse getProfilesResponse = client().execute( - GetProfileAction.INSTANCE, - new GetProfileRequest(randomAlphaOfLength(20), Set.of()) + GetProfilesAction.INSTANCE, + new GetProfilesRequest(randomAlphaOfLength(20), Set.of()) ).actionGet(); - assertThat(getProfilesResponse.getProfiles(), arrayWithSize(0)); + assertThat(getProfilesResponse.getProfiles(), empty()); // Ensure index does not exist assertThat(getProfileIndexResponse().getIndices(), not(hasItemInArray(INTERNAL_SECURITY_PROFILE_INDEX_8))); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 1b829098a0188..a977b7576ac40 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -112,7 +112,7 @@ import org.elasticsearch.xpack.core.security.action.privilege.GetPrivilegesAction; import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesAction; import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileAction; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileAction; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesAction; import org.elasticsearch.xpack.core.security.action.profile.SetProfileEnabledAction; import org.elasticsearch.xpack.core.security.action.profile.SuggestProfilesAction; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataAction; @@ -196,7 +196,7 @@ import org.elasticsearch.xpack.security.action.privilege.TransportGetPrivilegesAction; import org.elasticsearch.xpack.security.action.privilege.TransportPutPrivilegesAction; import org.elasticsearch.xpack.security.action.profile.TransportActivateProfileAction; -import org.elasticsearch.xpack.security.action.profile.TransportGetProfileAction; +import org.elasticsearch.xpack.security.action.profile.TransportGetProfilesAction; import org.elasticsearch.xpack.security.action.profile.TransportProfileHasPrivilegesAction; import org.elasticsearch.xpack.security.action.profile.TransportSetProfileEnabledAction; import org.elasticsearch.xpack.security.action.profile.TransportSuggestProfilesAction; @@ -298,7 +298,7 @@ import org.elasticsearch.xpack.security.rest.action.profile.RestActivateProfileAction; import org.elasticsearch.xpack.security.rest.action.profile.RestDisableProfileAction; import org.elasticsearch.xpack.security.rest.action.profile.RestEnableProfileAction; -import org.elasticsearch.xpack.security.rest.action.profile.RestGetProfileAction; +import org.elasticsearch.xpack.security.rest.action.profile.RestGetProfilesAction; import org.elasticsearch.xpack.security.rest.action.profile.RestSuggestProfilesAction; import org.elasticsearch.xpack.security.rest.action.profile.RestUpdateProfileDataAction; import org.elasticsearch.xpack.security.rest.action.realm.RestClearRealmCacheAction; @@ -1240,7 +1240,7 @@ public void onIndexModule(IndexModule module) { new ActionHandler<>(KibanaEnrollmentAction.INSTANCE, TransportKibanaEnrollmentAction.class), new ActionHandler<>(NodeEnrollmentAction.INSTANCE, TransportNodeEnrollmentAction.class), new ActionHandler<>(ProfileHasPrivilegesAction.INSTANCE, TransportProfileHasPrivilegesAction.class), - new ActionHandler<>(GetProfileAction.INSTANCE, TransportGetProfileAction.class), + new ActionHandler<>(GetProfilesAction.INSTANCE, TransportGetProfilesAction.class), new ActionHandler<>(ActivateProfileAction.INSTANCE, TransportActivateProfileAction.class), new ActionHandler<>(UpdateProfileDataAction.INSTANCE, TransportUpdateProfileDataAction.class), new ActionHandler<>(SuggestProfilesAction.INSTANCE, TransportSuggestProfilesAction.class), @@ -1322,7 +1322,7 @@ public List getRestHandlers( new RestKibanaEnrollAction(settings, getLicenseState()), new RestNodeEnrollmentAction(settings, getLicenseState()), new RestProfileHasPrivilegesAction(settings, securityContext.get(), getLicenseState()), - new RestGetProfileAction(settings, getLicenseState()), + new RestGetProfilesAction(settings, getLicenseState()), new RestActivateProfileAction(settings, getLicenseState()), new RestUpdateProfileDataAction(settings, getLicenseState()), new RestSuggestProfilesAction(settings, getLicenseState()), diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportGetProfileAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportGetProfilesAction.java similarity index 57% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportGetProfileAction.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportGetProfilesAction.java index 2dd7de7d9aba2..3e28ab01810ed 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportGetProfileAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportGetProfilesAction.java @@ -13,23 +13,29 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileAction; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileRequest; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesAction; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesRequest; import org.elasticsearch.xpack.core.security.action.profile.GetProfilesResponse; import org.elasticsearch.xpack.security.profile.ProfileService; -public class TransportGetProfileAction extends HandledTransportAction { +import java.util.List; + +public class TransportGetProfilesAction extends HandledTransportAction { private final ProfileService profileService; @Inject - public TransportGetProfileAction(TransportService transportService, ActionFilters actionFilters, ProfileService profileService) { - super(GetProfileAction.NAME, transportService, actionFilters, GetProfileRequest::new); + public TransportGetProfilesAction(TransportService transportService, ActionFilters actionFilters, ProfileService profileService) { + super(GetProfilesAction.NAME, transportService, actionFilters, GetProfilesRequest::new); this.profileService = profileService; } @Override - protected void doExecute(Task task, GetProfileRequest request, ActionListener listener) { - profileService.getProfile(request.getUid(), request.getDataKeys(), listener.map(GetProfilesResponse::new)); + protected void doExecute(Task task, GetProfilesRequest request, ActionListener listener) { + profileService.getProfiles( + request.getUids(), + request.getDataKeys(), + listener.map(resultsAndError -> new GetProfilesResponse(List.copyOf(resultsAndError.results()), resultsAndError.errors())) + ); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java index a7baea722202e..6c13806241081 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; @@ -56,6 +57,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.common.ResultsAndErrors; import org.elasticsearch.xpack.core.security.action.profile.Profile; import org.elasticsearch.xpack.core.security.action.profile.SuggestProfilesRequest; import org.elasticsearch.xpack.core.security.action.profile.SuggestProfilesResponse; @@ -79,7 +81,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.TreeSet; +import java.util.TreeMap; import java.util.function.Function; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -126,28 +128,43 @@ public ProfileService( this.threadPool = threadPool; } - public void getProfile(String uid, Set dataKeys, ActionListener listener) { - getVersionedDocument( - uid, - listener.map(versionedDocument -> versionedDocument != null ? versionedDocument.toProfile(dataKeys) : null) - ); + public void getProfiles(List uids, Set dataKeys, ActionListener> listener) { + getVersionedDocuments(uids, listener.map(resultsAndErrors -> { + if (resultsAndErrors != null) { + return new ResultsAndErrors<>( + resultsAndErrors.results().stream().map(versionedDocument -> versionedDocument.toProfile(dataKeys)).toList(), + resultsAndErrors.errors() + ); + } else { + return new ResultsAndErrors<>( + List.of(), + uids.stream() + .collect( + Collectors.toUnmodifiableMap( + Function.identity(), + uid -> new ElasticsearchException("profile index does not exist") + ) + ) + ); + } + })); } public void getProfileSubjects(Collection uids, ActionListener listener) { - getVersionedDocuments( - uids, - listener.map( - docsAndException -> docsAndException != null - ? new MultiProfileSubjectResponse( - docsAndException.v1() - .stream() - .filter(doc -> doc.enabled()) - .collect(Collectors.toMap(profileDoc -> profileDoc.uid(), profileDoc -> profileDoc.user().toSubject())), - docsAndException.v2() - ) - : new MultiProfileSubjectResponse(Map.of(), Set.of()) - ) - ); + getVersionedDocuments(uids, listener.map(resultsAndErrors -> { + if (resultsAndErrors != null) { + return new MultiProfileSubjectResponse( + resultsAndErrors.results() + .stream() + .map(VersionedDocument::doc) + .filter(ProfileDocument::enabled) + .collect(Collectors.toMap(ProfileDocument::uid, profileDoc -> profileDoc.user().toSubject())), + Set.copyOf(errorUidsExcludingNotFound(resultsAndErrors.errors())) + ); + } else { + return new MultiProfileSubjectResponse(Map.of(), Set.of()); + } + })); } // TODO: with request when we take request body for profile activation @@ -352,9 +369,9 @@ private void getVersionedDocument(String uid, ActionListener }); } - private void getVersionedDocuments(Collection uids, ActionListener, Set>> listener) { + private void getVersionedDocuments(Collection uids, ActionListener> listener) { if (uids.isEmpty()) { - listener.onResponse(new Tuple<>(List.of(), Set.of())); + listener.onResponse(ResultsAndErrors.empty()); return; } tryFreezeAndCheckIndex(listener).ifPresent(frozenProfileIndex -> { @@ -363,24 +380,25 @@ private void getVersionedDocuments(Collection uids, ActionListener new OriginSettingClient(client, getActionOrigin()).prepareMultiGet() .addIds(frozenProfileIndex.aliasName(), uids.stream().map(ProfileService::uidToDocId).toArray(String[]::new)) .execute(ActionListener.wrap(multiGetResponse -> { - List retrievedDocs = new ArrayList<>(multiGetResponse.getResponses().length); + List retrievedDocs = new ArrayList<>(multiGetResponse.getResponses().length); // ordered for tests - Set failures = new TreeSet<>(); - Exception loggedException = null; + final Map errors = new TreeMap<>(); for (MultiGetItemResponse itemResponse : multiGetResponse.getResponses()) { + final String profileUid = docIdToUid(itemResponse.getId()); if (itemResponse.isFailed()) { - failures.add(docIdToUid(itemResponse.getId())); - if (logger.isDebugEnabled() && itemResponse.getFailure().getFailure() != null) { - loggedException = ExceptionsHelper.useOrSuppress( - loggedException, - itemResponse.getFailure().getFailure() - ); - } + errors.put(profileUid, itemResponse.getFailure().getFailure()); } else if (itemResponse.getResponse() != null) { if (itemResponse.getResponse().isExists()) { - retrievedDocs.add(buildProfileDocument(itemResponse.getResponse().getSourceAsBytesRef())); - } else if (logger.isDebugEnabled()) { - logger.debug("Profile [{}] not found", docIdToUid(itemResponse.getId())); + retrievedDocs.add( + new VersionedDocument( + buildProfileDocument(itemResponse.getResponse().getSourceAsBytesRef()), + itemResponse.getResponse().getPrimaryTerm(), + itemResponse.getResponse().getSeqNo() + ) + ); + } else { + logger.debug("Profile [{}] not found", profileUid); + errors.put(profileUid, new ResourceNotFoundException("profile document not found")); } } else { assert false @@ -388,10 +406,18 @@ private void getVersionedDocuments(Collection uids, ActionListener format("Failed to retrieve profiles %s", failures), loggedException); + final ResultsAndErrors resultsAndErrors = new ResultsAndErrors<>(retrievedDocs, errors); + if (logger.isDebugEnabled() && false == resultsAndErrors.errors().isEmpty()) { + Exception loggedException = null; + final List errorUids = errorUidsExcludingNotFound(resultsAndErrors.errors()); + for (String uid : errorUids) { + loggedException = ExceptionsHelper.useOrSuppress(loggedException, resultsAndErrors.errors().get(uid)); + } + if (loggedException != null) { + logger.debug(() -> format("Failed to retrieve profiles %s", errorUids), loggedException); + } } - listener.onResponse(new Tuple<>(retrievedDocs, failures)); + listener.onResponse(resultsAndErrors); }, listener::onFailure)) ); }); @@ -813,6 +839,14 @@ private static ProfileDocument updateWithSubject(ProfileDocument doc, Subject su ); } + private static List errorUidsExcludingNotFound(Map errors) { + return errors.entrySet() + .stream() + .filter(entry -> entry.getValue() != null && false == entry.getValue() instanceof ResourceNotFoundException) + .map(Map.Entry::getKey) + .toList(); + } + // Package private for testing record VersionedDocument(ProfileDocument doc, long primaryTerm, long seqNo) { @@ -838,6 +872,7 @@ Profile toProfile(Set dataKeys) { new Profile.VersionControl(primaryTerm, seqNo) ); } + } public record MultiProfileSubjectResponse(Map profileUidToSubject, Set failureProfileUids) {} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestGetProfileAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestGetProfilesAction.java similarity index 70% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestGetProfileAction.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestGetProfilesAction.java index 8074fe839aa80..e4cf29eb8deb4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestGetProfileAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestGetProfilesAction.java @@ -12,20 +12,21 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestStatusToXContentListener; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileAction; -import org.elasticsearch.xpack.core.security.action.profile.GetProfileRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesAction; +import org.elasticsearch.xpack.core.security.action.profile.GetProfilesRequest; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; -public class RestGetProfileAction extends SecurityBaseRestHandler { +public class RestGetProfilesAction extends SecurityBaseRestHandler { - public RestGetProfileAction(Settings settings, XPackLicenseState licenseState) { + public RestGetProfilesAction(Settings settings, XPackLicenseState licenseState) { super(settings, licenseState); } @@ -41,9 +42,9 @@ public String getName() { @Override protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { - final String uid = request.param("uid"); + final String[] uids = request.paramAsStringArray("uid", Strings.EMPTY_ARRAY); final Set dataKeys = Strings.tokenizeByCommaToSet(request.param("data", null)); - final GetProfileRequest getProfileRequest = new GetProfileRequest(uid, dataKeys); - return channel -> client.execute(GetProfileAction.INSTANCE, getProfileRequest, new RestStatusToXContentListener<>(channel)); + final GetProfilesRequest getProfilesRequest = new GetProfilesRequest(Arrays.asList(uids), dataKeys); + return channel -> client.execute(GetProfilesAction.INSTANCE, getProfilesRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java index 039a80fff270f..5595f2550c1a6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java @@ -15,8 +15,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.get.GetAction; -import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetAction; import org.elasticsearch.action.get.MultiGetItemResponse; @@ -59,6 +57,7 @@ import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.common.ResultsAndErrors; import org.elasticsearch.xpack.core.security.action.profile.Profile; import org.elasticsearch.xpack.core.security.action.profile.SuggestProfilesRequest; import org.elasticsearch.xpack.core.security.action.profile.SuggestProfilesRequestTests; @@ -101,6 +100,7 @@ import static org.elasticsearch.xpack.core.security.support.Validation.VALID_NAME_CHARS; import static org.elasticsearch.xpack.security.Security.SECURITY_CRYPTO_THREAD_POOL_NAME; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_PROFILE_ALIAS; +import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -204,54 +204,68 @@ public void stopThreadPool() { terminate(threadPool); } - public void testGetProfileByUid() { - final String uid = randomAlphaOfLength(20); - doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(minNodeVersion.onOrAfter(Version.V_8_3_0) ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); - final GetRequest getRequest = (GetRequest) invocation.getArguments()[1]; - @SuppressWarnings("unchecked") - final ActionListener listener = (ActionListener) invocation.getArguments()[2]; - client.get(getRequest, listener); - return null; - }).when(client).execute(eq(GetAction.INSTANCE), any(GetRequest.class), anyActionListener()); - - final long lastSynchronized = Instant.now().toEpochMilli(); - mockGetRequest(uid, lastSynchronized); - - final PlainActionFuture future = new PlainActionFuture<>(); + public void testGetProfilesByUids() { + final List sampleDocumentParameters = randomList( + 1, + 5, + () -> new SampleDocumentParameter( + randomAlphaOfLength(20), + randomAlphaOfLengthBetween(3, 18), + randomList(0, 3, () -> randomAlphaOfLengthBetween(3, 8)), + Instant.now().toEpochMilli() + randomLongBetween(-100_000, 100_000) + ) + ); + mockMultiGetRequest(sampleDocumentParameters); + final PlainActionFuture> future = new PlainActionFuture<>(); final Set dataKeys = randomFrom(Set.of("app1"), Set.of("app2"), Set.of("app1", "app2"), Set.of()); + profileService.getProfiles(sampleDocumentParameters.stream().map(SampleDocumentParameter::uid).toList(), dataKeys, future); + final ResultsAndErrors resultsAndErrors = future.actionGet(); + assertThat(resultsAndErrors.results().size(), equalTo(sampleDocumentParameters.size())); + assertThat(resultsAndErrors.errors(), anEmptyMap()); - profileService.getProfile(uid, dataKeys, future); - final Profile profile = future.actionGet(); + int i = 0; + for (Profile profile : resultsAndErrors.results()) { + final Map applicationData = new HashMap<>(); - final Map applicationData = new HashMap<>(); + if (dataKeys != null && dataKeys.contains("app1")) { + applicationData.put("app1", Map.of("name", "app1")); + } - if (dataKeys != null && dataKeys.contains("app1")) { - applicationData.put("app1", Map.of("name", "app1")); - } + if (dataKeys != null && dataKeys.contains("app2")) { + applicationData.put("app2", Map.of("name", "app2")); + } - if (dataKeys != null && dataKeys.contains("app2")) { - applicationData.put("app2", Map.of("name", "app2")); + final SampleDocumentParameter sampleDocumentParameter = sampleDocumentParameters.get(i); + assertThat( + profile, + equalTo( + new Profile( + sampleDocumentParameter.uid, + true, + sampleDocumentParameter.lastSynchronized, + new Profile.ProfileUser( + sampleDocumentParameter.username, + sampleDocumentParameter.roles, + "realm_name_1", + "domainA", + "foo@example.com", + "User Foo" + ), + Map.of(), + applicationData, + new Profile.VersionControl(1, 0) + ) + ) + ); + i++; } + } - assertThat( - profile, - equalTo( - new Profile( - uid, - true, - lastSynchronized, - new Profile.ProfileUser("Foo", List.of("role1", "role2"), "realm_name_1", "domainA", "foo@example.com", "User Foo"), - Map.of(), - applicationData, - new Profile.VersionControl(1, 0) - ) - ) - ); + public void testGetProfilesEmptyUids() { + final PlainActionFuture> future = new PlainActionFuture<>(); + profileService.getProfiles(List.of(), Set.of(), future); + assertThat(future.actionGet().isEmpty(), is(true)); } @SuppressWarnings("unchecked") @@ -748,8 +762,39 @@ public void testActivateProfileWithDifferentUidFormats() throws IOException { assertThat(e3.getMessage(), containsString("The username must begin with an alphanumeric character")); } - private void mockGetRequest(String uid, long lastSynchronized) { - mockGetRequest(uid, "Foo", List.of("role1", "role2"), lastSynchronized); + record SampleDocumentParameter(String uid, String username, List roles, long lastSynchronized) {} + + private void mockMultiGetRequest(List sampleDocumentParameters) { + mockMultiGetRequest(sampleDocumentParameters, Map.of()); + } + + private void mockMultiGetRequest(List sampleDocumentParameters, Map errors) { + doAnswer(invocation -> { + assertThat( + threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), + equalTo(minNodeVersion.onOrAfter(Version.V_8_3_0) ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) + ); + final MultiGetRequest multiGetRequest = (MultiGetRequest) invocation.getArguments()[1]; + @SuppressWarnings("unchecked") + final ActionListener listener = (ActionListener) invocation.getArguments()[2]; + client.multiGet(multiGetRequest, listener); + return null; + }).when(client).execute(eq(MultiGetAction.INSTANCE), any(MultiGetRequest.class), anyActionListener()); + + final Map results = sampleDocumentParameters.stream() + .collect( + Collectors.toUnmodifiableMap( + param -> "profile_" + param.uid, + param -> getSampleProfileDocumentSource(param.uid, param.username, param.roles, param.lastSynchronized) + ) + ); + + SecurityMocks.mockMultiGetRequest( + client, + SECURITY_PROFILE_ALIAS, + results, + errors.entrySet().stream().collect(Collectors.toUnmodifiableMap(entry -> "profile_" + entry.getKey(), Map.Entry::getValue)) + ); } public static String getSampleProfileDocumentSource(String uid, String username, List roles, long lastSynchronized) { @@ -761,11 +806,6 @@ public static String getSampleProfileDocumentSource(String uid, String username, ); } - private void mockGetRequest(String uid, String username, List roles, long lastSynchronized) { - final String source = getSampleProfileDocumentSource(uid, username, roles, lastSynchronized); - SecurityMocks.mockGetRequest(client, SECURITY_PROFILE_ALIAS, "profile_" + uid, new BytesArray(source)); - } - private ProfileDocument randomProfileDocument(String uid) { return new ProfileDocument( uid, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java index f00f1a3bfd371..d45f087746762 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java @@ -7,18 +7,23 @@ package org.elasticsearch.xpack.security.test; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.get.GetAction; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetRequestBuilder; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.get.GetResult; @@ -37,11 +42,18 @@ import java.security.GeneralSecurityException; import java.time.Clock; import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptyMap; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_PROFILE_ALIAS; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_TOKENS_ALIAS; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; @@ -134,6 +146,74 @@ public static void mockGetRequestException(Client client, Exception e) { }).when(client).get(any(GetRequest.class), anyActionListener()); } + public static void mockMultiGetRequest(Client client, String indexAliasName, Map results) { + mockMultiGetRequest(client, indexAliasName, results, Map.of()); + } + + public static void mockMultiGetRequest( + Client client, + String indexAliasName, + Map results, + Map errors + ) { + final Set allDocumentIds = Stream.concat(results.keySet().stream(), errors.keySet().stream()) + .collect(Collectors.toUnmodifiableSet()); + Assert.assertThat("duplicate entries found in results and errors", allDocumentIds.size(), equalTo(results.size() + errors.size())); + doAnswer(inv -> { + Assert.assertThat(inv.getArguments(), arrayWithSize(2)); + Assert.assertThat(inv.getArguments()[0], instanceOf(MultiGetRequest.class)); + final MultiGetRequest request = (MultiGetRequest) inv.getArguments()[0]; + Assert.assertThat( + request.getItems().stream().map(MultiGetRequest.Item::id).collect(Collectors.toUnmodifiableSet()), + equalTo(allDocumentIds) + ); + + final List responses = new ArrayList<>(); + for (MultiGetRequest.Item item : request.getItems()) { + Assert.assertThat(item.index(), equalTo(indexAliasName)); + final String documentId = item.id(); + if (results.containsKey(documentId)) { + responses.add( + new MultiGetItemResponse( + new GetResponse( + new GetResult( + SECURITY_PROFILE_ALIAS, + documentId, + 0, + 1, + 1, + true, + new BytesArray(results.get(documentId)), + emptyMap(), + emptyMap() + ) + ), + null + ) + ); + } else { + final Exception exception = errors.get(documentId); + if (exception instanceof ResourceNotFoundException) { + final GetResponse missingResponse = mock(GetResponse.class); + when(missingResponse.isExists()).thenReturn(false); + when(missingResponse.getId()).thenReturn(documentId); + responses.add(new MultiGetItemResponse(missingResponse, null)); + } else { + final MultiGetResponse.Failure failure = mock(MultiGetResponse.Failure.class); + when(failure.getId()).thenReturn(documentId); + when(failure.getFailure()).thenReturn(exception); + responses.add(new MultiGetItemResponse(null, failure)); + } + } + } + Assert.assertThat(inv.getArguments()[1], instanceOf(ActionListener.class)); + @SuppressWarnings("unchecked") + final ActionListener listener = (ActionListener) inv.getArguments()[1]; + listener.onResponse(new MultiGetResponse(responses.toArray(MultiGetItemResponse[]::new))); + return null; + }).when(client).multiGet(any(MultiGetRequest.class), anyActionListener()); + } + public static void mockIndexRequest(Client client, String indexAliasName, Consumer consumer) { doAnswer(inv -> { Assert.assertThat(inv.getArguments(), arrayWithSize(1)); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/user_profile/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/user_profile/10_basic.yml index cb79405f2829d..3bd574d91d3b0 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/user_profile/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/user_profile/10_basic.yml @@ -18,12 +18,83 @@ setup: "email" : "joe@bazooka.gum" } + - do: + security.put_user: + username: "doe" + body: > + { + "password" : "s3krit-password", + "roles" : [ "superuser" ], + "full_name" : "Bazooka Doe", + "email" : "doe@bazooka.gum" + } + --- teardown: - do: security.delete_user: username: "joe" ignore: 404 + - do: + security.delete_user: + username: "doe" + ignore: 404 + +--- +"Test get user profiles": + - do: + security.activate_user_profile: + body: > + { + "grant_type": "password", + "username": "joe", + "password" : "s3krit-password" + } + - is_true: uid + - match: { "user.username" : "joe" } + - match: { "user.roles" : [ "superuser" ] } + - match: { "user.full_name" : "Bazooka Joe" } + - match: { "user.realm_name" : "default_native" } + - is_false: "user.realm_domain" + - is_true: _doc + - set: { uid: profile_uid_1 } + + - do: + security.activate_user_profile: + body: > + { + "grant_type": "password", + "username": "doe", + "password" : "s3krit-password" + } + - is_true: uid + - match: { "user.username" : "doe" } + - match: { "user.roles" : [ "superuser" ] } + - match: { "user.full_name" : "Bazooka Doe" } + - match: { "user.realm_name" : "default_native" } + - is_false: "user.realm_domain" + - is_true: _doc + - set: { uid: profile_uid_2 } + + - do: + security.get_user_profile: + uid: "u_eGdcwXYIE3LEOrqz6p-3DHQ4HrAtxuk_ttRNFh2m7rM_0,u_eZ75KhGvkY4_t0HfQpNPO1aO0tk6wd908bjUGieTKm8_0,does_not_exist" + + - length: { profiles : 2 } + - set: { profiles.0 : profile } + - match: { $profile.uid : "$profile_uid_1" } + - match: { $profile.user.username : "joe" } + - match: { $profile.data : {} } + - set: { profiles.1 : profile } + - match: { $profile.uid : "$profile_uid_2" } + - match: { $profile.user.username : "doe" } + - match: { $profile.data : {} } + + - is_true: errors + - set: { errors: errors } + - match: { $errors.count : 1 } + - match: { $errors.details.does_not_exist.type: "resource_not_found_exception" } + --- "Test user profile apis": @@ -48,9 +119,9 @@ teardown: security.get_user_profile: uid: "$profile_uid" - - length: { $body: 1 } - - is_true: "$profile_uid" - - set: { $profile_uid: profile } + - length: { profiles: 1 } + - is_false: errors + - set: { profiles.0: profile } - match: { $profile.uid : "$profile_uid" } - match: { $profile.user.username : "joe" } - match: { $profile.data : {} } @@ -83,9 +154,10 @@ teardown: uid: "$profile_uid" data: "app1" - - length: { $body: 1 } - - is_true: "$profile_uid" - - set: { $profile_uid: profile } + - length: { profiles: 1 } + - is_false: errors + - set: { profiles.0: profile } + - match: { $profile.uid: "$profile_uid" } - match: { $profile.data: { "app1": { "theme": "default" } } } # Activate again should get the same profile @@ -105,19 +177,23 @@ teardown: uid: "$profile_uid" data: "*" - - length: { $body: 1 } - - is_true: "$profile_uid" - - set: { $profile_uid: profile } + - length: { profiles: 1 } + - is_false: errors + - set: { profiles.0: profile } + - match: { $profile.uid: "$profile_uid" } - match: { $profile.labels: { "kibana": { "spaces": "demo" } } } - match: { $profile.data: { "app1": { "theme": "default" }, "app2": { "theme": "dark"} } } - # Attempting to get a non-existing profile leads to 404 + # Attempting to get a non-existing profile - do: - catch: missing security.get_user_profile: uid: no_such_profile_uid - - length: { $body: 0 } + - length: { profiles: 0 } + - is_true: errors + - set: { errors: errors } + - match: { $errors.count : 1 } + - match: { $errors.details.no_such_profile_uid.type: "resource_not_found_exception" } --- @@ -136,7 +212,7 @@ teardown: - do: security.get_user_profile: uid: "$profile_uid" - - set: { $profile_uid: profile } + - set: { profiles.0: profile } - is_true: "$profile.enabled" # disable the profile @@ -147,7 +223,7 @@ teardown: - do: security.get_user_profile: uid: "$profile_uid" - - set: { $profile_uid: profile } + - set: { profiles.0: profile } - is_false: "$profile.enabled" # enable again @@ -158,7 +234,7 @@ teardown: - do: security.get_user_profile: uid: "$profile_uid" - - set: { $profile_uid: profile } + - set: { profiles.0: profile } - is_true: "$profile.enabled" --- diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/140_user_profile.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/140_user_profile.yml index a8c3015ecbc8a..c667c9266b8d5 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/140_user_profile.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/140_user_profile.yml @@ -8,7 +8,7 @@ - do: node_selector: - version: " 8.4.0 - " + version: " 8.5.0 - " security.activate_user_profile: body: > { @@ -22,12 +22,11 @@ - do: node_selector: - version: " 8.4.0 - " + version: " 8.5.0 - " security.get_user_profile: uid: "$profile_uid" - - length: { $body: 1 } - - is_true: "$profile_uid" - - set: { $profile_uid: profile } + - length: { profiles : 1 } + - set: { profiles.0 : profile } - match: { $profile.uid : "$profile_uid" } - match: { $profile.user.username : "test_user" } From 72e24d38bbc059f7fe1c3bc2860d4e3c4b760a70 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Wed, 10 Aug 2022 08:12:00 +0200 Subject: [PATCH 156/265] Log when repository is marked as corrupted (#89132) --- .../repositories/blobstore/BlobStoreRepository.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 69c01a51b337b..8bc13a5cbef25 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -2026,6 +2026,7 @@ private static String previousWriterMessage(@Nullable Tuple previo private void markRepoCorrupted(long corruptedGeneration, Exception originalException, ActionListener listener) { assert corruptedGeneration != RepositoryData.UNKNOWN_REPO_GEN; assert bestEffortConsistency == false; + logger.warn(() -> "Marking repository [" + metadata.name() + "] as corrupted", originalException); submitUnbatchedTask( "mark repository corrupted [" + metadata.name() + "][" + corruptedGeneration + "]", new ClusterStateUpdateTask() { From 546a2e28989a2f356891765205d406c4f2babea9 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 10 Aug 2022 08:17:55 +0100 Subject: [PATCH 157/265] Add note on per-segment field name overhead (#89152) We encountered a case where a substantial fraction of the heap usage was due to per-segment-per-field `FieldInfo` objects, particularly `FieldInfo#name`. This commit adds a note to the sizing docs about this overhead. --- docs/reference/how-to/size-your-shards.asciidoc | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index a1236a11525f9..d06891278336b 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -55,14 +55,14 @@ thread pool>>. This can result in low throughput and slow search speeds. [discrete] [[each-shard-has-overhead]] -==== Each index, shard and field has overhead +==== Each index, shard, segment and field has overhead Every index and every shard requires some memory and CPU resources. In most cases, a small set of large shards uses fewer resources than many small shards. Segments play a big role in a shard's resource usage. Most shards contain -several segments, which store its index data. {es} keeps segment metadata in -JVM heap memory so it can be quickly retrieved for searches. As a shard grows, +several segments, which store its index data. {es} keeps some segment metadata +in heap memory so it can be quickly retrieved for searches. As a shard grows, its segments are <> into fewer, larger segments. This decreases the number of segments, which means less metadata is kept in heap memory. @@ -72,6 +72,13 @@ space. By default {es} will automatically create a mapping for every field in every document it indexes, but you can switch off this behaviour to <>. +Moreover every segment requires a small amount of heap memory for each mapped +field. This per-segment-per-field heap overhead includes a copy of the field +name, encoded using ISO-8859-1 if applicable or UTF-16 otherwise. Usually this +is not noticeable, but you may need to account for this overhead if your shards +have high segment counts and the corresponding mappings contain high field +counts and/or very long field names. + [discrete] [[shard-auto-balance]] ==== {es} automatically balances shards within a data tier From ceffaf9aad06ceeecaf55c8f1ef86df9661e0082 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 10 Aug 2022 12:39:24 +0100 Subject: [PATCH 158/265] Improve rejection of ambiguous voting config name (#89239) Today if there are multiple nodes with the same name then `POST /_cluster/voting_config_exclusions?node_names=ambiguous-name` will return a `500 Internal Server Error` and a mysterious message. This commit changes the behaviour to throw an `IllegalArgumentException` (i.e. `400 Bad Request`) along with a more useful message describing the problem. --- docs/changelog/89239.yaml | 5 +++ .../AddVotingConfigExclusionsRequest.java | 11 +++++- ...AddVotingConfigExclusionsRequestTests.java | 36 +++++++++++++++++++ 3 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/89239.yaml diff --git a/docs/changelog/89239.yaml b/docs/changelog/89239.yaml new file mode 100644 index 0000000000000..be04bd916cf89 --- /dev/null +++ b/docs/changelog/89239.yaml @@ -0,0 +1,5 @@ +pr: 89239 +summary: Improve rejection of ambiguous voting config name +area: Cluster Coordination +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index e9473228ba1f4..75d512683e318 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -102,7 +102,16 @@ Set resolveVotingConfigExclusions(ClusterState currentSta } else { assert nodeNames.length > 0; Map existingNodes = allNodes.stream() - .collect(Collectors.toMap(DiscoveryNode::getName, Function.identity())); + .collect(Collectors.toMap(DiscoveryNode::getName, Function.identity(), (n1, n2) -> { + throw new IllegalArgumentException( + org.elasticsearch.core.Strings.format( + "node name [%s] is ambiguous, matching [%s] and [%s]; specify node ID instead", + n1.getName(), + n1.descriptionWithoutAttributes(), + n2.descriptionWithoutAttributes() + ) + ); + })); for (String nodeName : nodeNames) { if (existingNodes.containsKey(nodeName)) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java index 5023fb131a1aa..5529c52044624 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java @@ -25,8 +25,10 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class AddVotingConfigExclusionsRequestTests extends ESTestCase { @@ -235,6 +237,40 @@ public void testResolveByNodeNames() { ); } + public void testResolveAmbiguousName() { + final DiscoveryNode node1 = new DiscoveryNode( + "ambiguous-name", + "nodeId1", + buildNewFakeTransportAddress(), + emptyMap(), + Set.of(DiscoveryNodeRole.MASTER_ROLE), + Version.CURRENT + ); + + final DiscoveryNode node2 = new DiscoveryNode( + "ambiguous-name", + "nodeId2", + buildNewFakeTransportAddress(), + emptyMap(), + Set.of(DiscoveryNodeRole.MASTER_ROLE), + Version.CURRENT + ); + + final ClusterState clusterState = ClusterState.builder(new ClusterName("cluster")) + .nodes(new Builder().add(node1).add(node2).localNodeId(node1.getId())) + .build(); + + final var request = new AddVotingConfigExclusionsRequest("ambiguous-name"); + assertThat( + expectThrows(IllegalArgumentException.class, () -> request.resolveVotingConfigExclusions(clusterState)).getMessage(), + allOf( + containsString("node name [ambiguous-name] is ambiguous"), + containsString(node1.descriptionWithoutAttributes()), + containsString(node2.descriptionWithoutAttributes()) + ) + ); + } + public void testResolveRemoveExistingVotingConfigExclusions() { final DiscoveryNode node1 = new DiscoveryNode( "nodeName1", From a2785944002231e6cb2bc2d8dbe7b40b29104303 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 10 Aug 2022 15:08:55 +0300 Subject: [PATCH 159/265] [ML] Move method to compute current memory scale into NativeMemoryCapacity (#89240) This commit moves method `currentScale` from `MlAutoscalingDeciderService` to `NativeMemoryCapacity` as it allows for easier reuse without coupling to the autoscaling service. --- .../MlAutoscalingDeciderService.java | 35 +-------------- .../ml/autoscaling/NativeMemoryCapacity.java | 44 +++++++++++++++---- .../xpack/ml/job/JobNodeSelector.java | 3 +- 3 files changed, 38 insertions(+), 44 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java index bf76d20ab5ad8..fb9d352d2504a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java @@ -40,7 +40,6 @@ import java.time.Duration; import java.time.Instant; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Comparator; import java.util.HashMap; @@ -296,40 +295,8 @@ private boolean newScaleDownCheck() { return scaleDownDetected == NO_SCALE_DOWN_POSSIBLE; } - /** - * The "current scale" is defined as the possible capacity of the current cluster, not - * the sum of what's actually in use. - * @return A {@link NativeMemoryCapacity} object where the "tier requirement" is the sum of - * the ML native memory allowance (less per-node overhead) on all ML nodes, the - * "node requirement" is the highest ML native memory allowance (less per-node overhead) - * across all ML nodes and the JVM size is the biggest JVM size across all ML nodes. - */ - public static NativeMemoryCapacity currentScale( - final List machineLearningNodes, - int maxMachineMemoryPercent, - boolean useAuto - ) { - long[] mlMemory = machineLearningNodes.stream() - .mapToLong(node -> NativeMemoryCalculator.allowedBytesForMl(node, maxMachineMemoryPercent, useAuto).orElse(0L)) - // NativeMemoryCapacity is in terms of ML memory excluding the per-node overhead - .map(mem -> Math.max(mem - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), 0L)) - .toArray(); - - return new NativeMemoryCapacity( - Arrays.stream(mlMemory).sum(), - Arrays.stream(mlMemory).max().orElse(0L), - // We assume that JVM size is universal, at least, the largest JVM indicates the largest node - machineLearningNodes.stream() - .map(MlAutoscalingDeciderService::getNodeJvmSize) - .filter(OptionalLong::isPresent) - .map(OptionalLong::getAsLong) - .max(Long::compare) - .orElse(null) - ); - } - NativeMemoryCapacity currentScale(final List machineLearningNodes) { - return currentScale(machineLearningNodes, maxMachineMemoryPercent, useAuto); + return NativeMemoryCapacity.currentScale(machineLearningNodes, maxMachineMemoryPercent, useAuto); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/NativeMemoryCapacity.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/NativeMemoryCapacity.java index 2a200c6e7b1d4..9662c935f8301 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/NativeMemoryCapacity.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/NativeMemoryCapacity.java @@ -9,11 +9,15 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingCapacity; import org.elasticsearch.xpack.ml.utils.NativeMemoryCalculator; +import java.util.Arrays; +import java.util.List; import java.util.Objects; +import java.util.OptionalLong; import static org.elasticsearch.xpack.ml.MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD; @@ -27,14 +31,6 @@ public class NativeMemoryCapacity { private static final Logger logger = LogManager.getLogger(NativeMemoryCapacity.class); - static NativeMemoryCapacity from(NativeMemoryCapacity capacity) { - return new NativeMemoryCapacity( - capacity.tierMlNativeMemoryRequirementExcludingOverhead, - capacity.nodeMlNativeMemoryRequirementExcludingOverhead, - capacity.jvmSize - ); - } - private final long tierMlNativeMemoryRequirementExcludingOverhead; private final long nodeMlNativeMemoryRequirementExcludingOverhead; private final Long jvmSize; @@ -269,4 +265,36 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(tierMlNativeMemoryRequirementExcludingOverhead, nodeMlNativeMemoryRequirementExcludingOverhead, jvmSize); } + + /** + * The "current scale" is defined as the possible capacity of the current cluster, not + * the sum of what's actually in use. + * @return A {@link NativeMemoryCapacity} object where the "tier requirement" is the sum of + * the ML native memory allowance (less per-node overhead) on all ML nodes, the + * "node requirement" is the highest ML native memory allowance (less per-node overhead) + * across all ML nodes and the JVM size is the biggest JVM size across all ML nodes. + */ + public static NativeMemoryCapacity currentScale( + final List machineLearningNodes, + int maxMachineMemoryPercent, + boolean useAuto + ) { + long[] mlMemory = machineLearningNodes.stream() + .mapToLong(node -> NativeMemoryCalculator.allowedBytesForMl(node, maxMachineMemoryPercent, useAuto).orElse(0L)) + // NativeMemoryCapacity is in terms of ML memory excluding the per-node overhead + .map(mem -> Math.max(mem - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), 0L)) + .toArray(); + + return new NativeMemoryCapacity( + Arrays.stream(mlMemory).sum(), + Arrays.stream(mlMemory).max().orElse(0L), + // We assume that JVM size is universal, at least, the largest JVM indicates the largest node + machineLearningNodes.stream() + .map(MlAutoscalingDeciderService::getNodeJvmSize) + .filter(OptionalLong::isPresent) + .map(OptionalLong::getAsLong) + .max(Long::compare) + .orElse(null) + ); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobNodeSelector.java index 9326af7aa6785..2997af2e5a1a8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobNodeSelector.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingDeciderService; import org.elasticsearch.xpack.ml.autoscaling.NativeMemoryCapacity; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.utils.NativeMemoryCalculator; @@ -107,7 +106,7 @@ public Tuple currentCapacityAndMaxFreeMemory( int maxOpenJobs ) { List capableNodes = candidateNodes.stream().filter(n -> this.nodeFilter.apply(n) == null).toList(); - NativeMemoryCapacity currentCapacityForMl = MlAutoscalingDeciderService.currentScale( + NativeMemoryCapacity currentCapacityForMl = NativeMemoryCapacity.currentScale( capableNodes, maxMachineMemoryPercent, useAutoMemoryPercentage From 341f3b717aec5028d7d7b57b19753061bcd22583 Mon Sep 17 00:00:00 2001 From: GabyCT Date: Wed, 10 Aug 2022 09:40:01 -0500 Subject: [PATCH 160/265] [DOCS] Update URLs in plugin document (#89221) This PR updates the URLs for several references that are being used in the plugin document. Signed-off-by: Gabriela Cervantes --- docs/plugins/authors.asciidoc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/plugins/authors.asciidoc b/docs/plugins/authors.asciidoc index 8ffa6a0f5fe5a..7c1f285cb9e4b 100644 --- a/docs/plugins/authors.asciidoc +++ b/docs/plugins/authors.asciidoc @@ -3,12 +3,12 @@ :plugin-properties-files: {elasticsearch-root}/build-tools/src/main/resources -The Elasticsearch repository contains https://github.com/elastic/elasticsearch/tree/master/plugins/examples[examples of plugins]. Some of these include: +The Elasticsearch repository contains {es-repo}tree/main/plugins/examples[examples of plugins]. Some of these include: -* a plugin with https://github.com/elastic/elasticsearch/tree/master/plugins/examples/custom-settings[custom settings] -* adding https://github.com/elastic/elasticsearch/tree/master/plugins/examples/rest-handler[custom rest endpoints] -* adding a https://github.com/elastic/elasticsearch/tree/master/plugins/examples/rescore[custom rescorer] -* a script https://github.com/elastic/elasticsearch/tree/master/plugins/examples/script-expert-scoring[implemented in Java] +* a plugin with {es-repo}tree/main/plugins/examples/custom-settings[custom settings] +* adding {es-repo}tree/main/plugins/examples/rest-handler[custom rest endpoints] +* adding a {es-repo}tree/main/plugins/examples/rescore[custom rescorer] +* a script {es-repo}tree/main/plugins/examples/script-expert-scoring[implemented in Java] These examples provide the bare bones needed to get started. For more information about how to write a plugin, we recommend looking at the plugins From 399a8ac283bb5819016c3791f2a9f40c723b9e4c Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Wed, 10 Aug 2022 18:04:22 +0300 Subject: [PATCH 161/265] Add TransportHealthNodeAction (#89127) --- .../elasticsearch/ElasticsearchException.java | 9 +- .../HealthNodeNotDiscoveredException.java | 35 ++ .../action/TransportHealthNodeAction.java | 123 ++++++ .../health/node/selection/HealthNode.java | 10 + .../node/selection/HealthNodeTaskParams.java | 6 +- .../ExceptionSerializationTests.java | 2 + .../TransportHealthNodeActionTests.java | 378 ++++++++++++++++++ .../node/selection/HealthNodeTests.java | 77 ++++ .../ClusterStateCreationUtils.java | 38 +- 9 files changed, 673 insertions(+), 5 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/health/node/action/HealthNodeNotDiscoveredException.java create mode 100644 server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java create mode 100644 server/src/test/java/org/elasticsearch/health/node/action/TransportHealthNodeActionTests.java create mode 100644 server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTests.java diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 91d93213c454c..77c2abac8304f 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -17,6 +17,7 @@ import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; +import org.elasticsearch.health.node.action.HealthNodeNotDiscoveredException; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -721,7 +722,7 @@ public static T writeStackTraces(T throwable, StreamOutput /** * This is the list of Exceptions Elasticsearch can throw over the wire or save into a corruption marker. Each value in the enum is a * single exception tying the Class to an id for use of the encode side and the id back to a constructor for use on the decode side. As - * such its ok if the exceptions to change names so long as their constructor can still read the exception. Each exception is listed + * such it's ok if the exceptions to change names so long as their constructor can still read the exception. Each exception is listed * in id order below. If you want to remove an exception leave a tombstone comment and mark the id as null in * ExceptionSerializationTests.testIds.ids. */ @@ -1571,6 +1572,12 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.snapshots.SnapshotNameAlreadyInUseException::new, 165, Version.V_8_2_0 + ), + HEALTH_NODE_NOT_DISCOVERED_EXCEPTION( + HealthNodeNotDiscoveredException.class, + HealthNodeNotDiscoveredException::new, + 166, + Version.V_8_5_0 ); final Class exceptionClass; diff --git a/server/src/main/java/org/elasticsearch/health/node/action/HealthNodeNotDiscoveredException.java b/server/src/main/java/org/elasticsearch/health/node/action/HealthNodeNotDiscoveredException.java new file mode 100644 index 0000000000000..90f7fe0a014a4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/health/node/action/HealthNodeNotDiscoveredException.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.health.node.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; + +/** + * Exception which indicates that no health node is selected in this cluster, aka the + * health node persistent task is not assigned. + */ +public class HealthNodeNotDiscoveredException extends ElasticsearchException { + + public HealthNodeNotDiscoveredException(String message) { + super(message); + } + + public HealthNodeNotDiscoveredException(StreamInput in) throws IOException { + super(in); + } + + @Override + public RestStatus status() { + return RestStatus.SERVICE_UNAVAILABLE; + } +} diff --git a/server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java b/server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java new file mode 100644 index 0000000000000..2c2070ca31b20 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.health.node.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.health.node.selection.HealthNode; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; + +import static org.elasticsearch.core.Strings.format; + +/** + * A base class for operations that need to be performed on the health node. + */ +public abstract class TransportHealthNodeAction extends + HandledTransportAction { + + private static final Logger logger = LogManager.getLogger(TransportHealthNodeAction.class); + + protected final TransportService transportService; + protected final ClusterService clusterService; + protected final ThreadPool threadPool; + protected final String executor; + + private final Writeable.Reader responseReader; + + protected TransportHealthNodeAction( + String actionName, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + Writeable.Reader request, + Writeable.Reader response, + String executor + ) { + super(actionName, true, transportService, actionFilters, request); + this.transportService = transportService; + this.clusterService = clusterService; + this.threadPool = threadPool; + this.executor = executor; + this.responseReader = response; + } + + protected abstract void healthOperation(Task task, Request request, ClusterState state, ActionListener listener) + throws Exception; + + @Override + protected void doExecute(Task task, final Request request, ActionListener listener) { + ClusterState state = clusterService.state(); + logger.trace("starting to process request [{}] with cluster state version [{}]", request, state.version()); + if (isTaskCancelled(task)) { + listener.onFailure(new TaskCancelledException("Task was cancelled")); + return; + } + try { + ClusterState clusterState = clusterService.state(); + DiscoveryNode healthNode = HealthNode.findHealthNode(clusterState); + DiscoveryNode localNode = clusterState.nodes().getLocalNode(); + if (healthNode == null) { + listener.onFailure(new HealthNodeNotDiscoveredException("Health node was null")); + } else if (localNode.getId().equals(healthNode.getId())) { + threadPool.executor(executor).execute(() -> { + try { + if (isTaskCancelled(task)) { + listener.onFailure(new TaskCancelledException("Task was cancelled")); + } else { + healthOperation(task, request, clusterState, listener); + } + } catch (Exception e) { + listener.onFailure(e); + } + }); + } else { + logger.trace("forwarding request [{}] to health node [{}]", actionName, healthNode); + ActionListenerResponseHandler handler = new ActionListenerResponseHandler<>(listener, responseReader) { + @Override + public void handleException(final TransportException exception) { + logger.trace( + () -> format("failure when forwarding request [%s] to health node [%s]", actionName, healthNode), + exception + ); + listener.onFailure(exception); + } + }; + if (task != null) { + transportService.sendChildRequest(healthNode, actionName, request, task, TransportRequestOptions.EMPTY, handler); + } else { + transportService.sendRequest(healthNode, actionName, request, handler); + } + } + } catch (Exception e) { + logger.trace(() -> format("Failed to route/execute health node action %s", actionName), e); + listener.onFailure(e); + } + } + + private boolean isTaskCancelled(Task task) { + return (task instanceof CancellableTask t) && t.isCancelled(); + } +} diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNode.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNode.java index 4a5fdfea1df7f..7ed66ec91dbac 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNode.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNode.java @@ -9,6 +9,7 @@ package org.elasticsearch.health.node.selection; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.core.Nullable; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -43,4 +44,13 @@ public static PersistentTasksCustomMetadata.PersistentTask findTask(ClusterSt PersistentTasksCustomMetadata taskMetadata = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); return taskMetadata == null ? null : taskMetadata.getTask(TASK_NAME); } + + @Nullable + public static DiscoveryNode findHealthNode(ClusterState clusterState) { + PersistentTasksCustomMetadata.PersistentTask task = findTask(clusterState); + if (task == null || task.isAssigned() == false) { + return null; + } + return clusterState.nodes().get(task.getAssignment().getExecutorNode()); + } } diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java index 186296492991a..95e48fb9244a3 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java @@ -23,15 +23,15 @@ /** * Encapsulates the parameters needed to start the health node task, currently no parameters are required. */ -class HealthNodeTaskParams implements PersistentTaskParams { +public class HealthNodeTaskParams implements PersistentTaskParams { - private static final HealthNodeTaskParams INSTANCE = new HealthNodeTaskParams(); + public static final HealthNodeTaskParams INSTANCE = new HealthNodeTaskParams(); public static final ObjectParser PARSER = new ObjectParser<>(TASK_NAME, true, () -> INSTANCE); HealthNodeTaskParams() {} - HealthNodeTaskParams(StreamInput in) {} + HealthNodeTaskParams(StreamInput ignored) {} @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 5381278f21f61..d59b18bf3b1f2 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.Tuple; import org.elasticsearch.env.ShardLockObtainFailedException; +import org.elasticsearch.health.node.action.HealthNodeNotDiscoveredException; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.query.QueryShardException; @@ -829,6 +830,7 @@ public void testIds() { ids.put(163, RepositoryConflictException.class); ids.put(164, VersionConflictException.class); ids.put(165, SnapshotNameAlreadyInUseException.class); + ids.put(166, HealthNodeNotDiscoveredException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/health/node/action/TransportHealthNodeActionTests.java b/server/src/test/java/org/elasticsearch/health/node/action/TransportHealthNodeActionTests.java new file mode 100644 index 0000000000000..730e93eab2a96 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/health/node/action/TransportHealthNodeActionTests.java @@ -0,0 +1,378 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.health.node.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActionTestUtils; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class TransportHealthNodeActionTests extends ESTestCase { + private static ThreadPool threadPool; + + private ClusterService clusterService; + private TransportService transportService; + private CapturingTransport transport; + private DiscoveryNode localNode; + private DiscoveryNode remoteNode; + private DiscoveryNode[] allNodes; + private TaskManager taskManager; + + @BeforeClass + public static void beforeClass() { + threadPool = new TestThreadPool("TransportHealthNodeActionTests"); + } + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); + transport = new CapturingTransport(); + clusterService = createClusterService(threadPool); + transportService = transport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> clusterService.localNode(), + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + localNode = new DiscoveryNode( + "local_node", + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Set.of(DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + remoteNode = new DiscoveryNode( + "remote_node", + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Set.of(DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + allNodes = new DiscoveryNode[] { localNode, remoteNode }; + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + transportService.close(); + } + + @AfterClass + public static void afterClass() { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + } + + public static class Request extends ActionRequest { + + Request() {} + + Request(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } + } + + static class Response extends ActionResponse { + private long identity = randomLong(); + + Response() {} + + Response(StreamInput in) throws IOException { + super(in); + identity = in.readLong(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return identity == response.identity; + } + + @Override + public int hashCode() { + return Objects.hash(identity); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(identity); + } + } + + class Action extends TransportHealthNodeAction { + Action(String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { + this(actionName, transportService, clusterService, threadPool, ThreadPool.Names.SAME); + } + + Action( + String actionName, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + String executor + ) { + super( + actionName, + transportService, + clusterService, + threadPool, + new ActionFilters(new HashSet<>()), + Request::new, + Response::new, + executor + ); + } + + @Override + protected void doExecute(Task task, final Request request, ActionListener listener) { + // remove unneeded threading by wrapping listener with SAME to prevent super.doExecute from wrapping it with LISTENER + super.doExecute(task, request, new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.SAME, listener, false)); + } + + @Override + protected void healthOperation(Task task, Request request, ClusterState state, ActionListener listener) { + listener.onResponse(new Response()); + } + } + + class WaitForSignalAction extends Action { + private final CountDownLatch countDownLatch; + + WaitForSignalAction( + String actionName, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + CountDownLatch countDownLatch + ) { + super(actionName, transportService, clusterService, threadPool, ThreadPool.Names.SAME); + this.countDownLatch = countDownLatch; + } + + @Override + protected void doExecute(Task task, final Request request, ActionListener listener) { + try { + countDownLatch.await(); + } catch (InterruptedException e) { + fail("Something went wrong while waiting for the latch"); + } + super.doExecute(task, request, listener); + } + } + + class HealthOperationWithExceptionAction extends Action { + + HealthOperationWithExceptionAction( + String actionName, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool + ) { + super(actionName, transportService, clusterService, threadPool); + } + + @Override + protected void healthOperation(Task task, Request request, ClusterState state, ActionListener listener) { + throw new RuntimeException("Simulated"); + } + } + + public void testLocalHealthNode() throws ExecutionException, InterruptedException { + final boolean healthOperationFailure = randomBoolean(); + + Request request = new Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + + final Exception exception = new Exception(); + final Response response = new Response(); + + setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, localNode, allNodes)); + + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool) { + @Override + protected void healthOperation(Task task, Request request, ClusterState state, ActionListener listener) { + if (healthOperationFailure) { + listener.onFailure(exception); + } else { + listener.onResponse(response); + } + } + }, null, request, listener); + assertTrue(listener.isDone()); + + if (healthOperationFailure) { + try { + listener.get(); + fail("Expected exception but returned proper result"); + } catch (ExecutionException ex) { + assertThat(ex.getCause(), equalTo(exception)); + } + } else { + assertThat(listener.get(), equalTo(response)); + } + } + + public void testHealthNodeNotAvailable() throws InterruptedException { + Request request = new Request(); + setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); + PlainActionFuture listener = new PlainActionFuture<>(); + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool), null, request, listener); + assertTrue(listener.isDone()); + try { + listener.get(); + fail("NoHealthNodeSelectedException should be thrown"); + } catch (ExecutionException ex) { + assertThat(ex.getCause(), instanceOf(HealthNodeNotDiscoveredException.class)); + } + } + + public void testDelegateToHealthNodeWithoutParentTask() throws ExecutionException, InterruptedException { + Request request = new Request(); + setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, remoteNode, allNodes)); + + PlainActionFuture listener = new PlainActionFuture<>(); + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool), null, request, listener); + + assertThat(transport.capturedRequests().length, equalTo(1)); + CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; + assertThat(capturedRequest.node(), equalTo(remoteNode)); + assertThat(capturedRequest.request(), equalTo(request)); + assertThat(capturedRequest.action(), equalTo("internal:testAction")); + + Response response = new Response(); + transport.handleResponse(capturedRequest.requestId(), response); + assertTrue(listener.isDone()); + assertThat(listener.get(), equalTo(response)); + } + + public void testDelegateToHealthNodeWithParentTask() throws ExecutionException, InterruptedException { + Request request = new Request(); + setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, remoteNode, allNodes)); + + PlainActionFuture listener = new PlainActionFuture<>(); + final CancellableTask task = (CancellableTask) taskManager.register("type", "internal:testAction", request); + ActionTestUtils.execute(new Action("internal:testAction", transportService, clusterService, threadPool), task, request, listener); + + assertThat(transport.capturedRequests().length, equalTo(1)); + CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; + assertThat(capturedRequest.node(), equalTo(remoteNode)); + assertThat(capturedRequest.request(), equalTo(request)); + assertThat(capturedRequest.action(), equalTo("internal:testAction")); + + Response response = new Response(); + transport.handleResponse(capturedRequest.requestId(), response); + assertTrue(listener.isDone()); + assertThat(listener.get(), equalTo(response)); + } + + public void testHealthNodeOperationWithException() throws InterruptedException { + Request request = new Request(); + setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, localNode, allNodes)); + PlainActionFuture listener = new PlainActionFuture<>(); + ActionTestUtils.execute( + new HealthOperationWithExceptionAction("internal:testAction", transportService, clusterService, threadPool), + null, + request, + listener + ); + assertTrue(listener.isDone()); + try { + listener.get(); + fail("A simulated RuntimeException should be thrown"); + } catch (ExecutionException ex) { + assertThat(ex.getCause().getMessage(), equalTo("Simulated")); + } + } + + public void testTaskCancellation() { + Request request = new Request(); + final CancellableTask task = (CancellableTask) taskManager.register("type", "internal:testAction", request); + + PlainActionFuture listener = new PlainActionFuture<>(); + CountDownLatch countDownLatch = new CountDownLatch(1); + + threadPool.executor(ThreadPool.Names.MANAGEMENT) + .submit( + () -> ActionTestUtils.execute( + new WaitForSignalAction("internal:testAction", transportService, clusterService, threadPool, countDownLatch), + task, + request, + listener + ) + ); + + taskManager.cancel(task, "", () -> {}); + assertThat(task.isCancelled(), equalTo(true)); + + countDownLatch.countDown(); + + expectThrows(TaskCancelledException.class, listener::actionGet); + } +} diff --git a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTests.java b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTests.java new file mode 100644 index 0000000000000..720a649e863b9 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.health.node.selection; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.Set; + +import static org.elasticsearch.persistent.PersistentTasksExecutor.NO_NODE_FOUND; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class HealthNodeTests extends ESTestCase { + + private final DiscoveryNode node1 = new DiscoveryNode( + "node_1", + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Set.of(DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + private final DiscoveryNode node2 = new DiscoveryNode( + "node_2", + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Set.of(DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + private final DiscoveryNode[] allNodes = new DiscoveryNode[] { node1, node2 }; + + public void testFindTask() { + ClusterState state = ClusterStateCreationUtils.state(node1, node1, node1, allNodes); + assertThat(HealthNode.findTask(state), notNullValue()); + } + + public void testFindNoTask() { + ClusterState state = ClusterStateCreationUtils.state(node1, node1, allNodes); + assertThat(HealthNode.findTask(state), nullValue()); + } + + public void testFindHealthNode() { + ClusterState state = ClusterStateCreationUtils.state(node1, node1, node1, allNodes); + assertThat(HealthNode.findHealthNode(state), equalTo(node1)); + } + + public void testFindHealthNodeNoTask() { + ClusterState state = ClusterStateCreationUtils.state(node1, node1, allNodes); + assertThat(HealthNode.findHealthNode(state), nullValue()); + } + + public void testfindHealthNodeNoAssignment() { + PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); + tasks.addTask(HealthNode.TASK_NAME, HealthNode.TASK_NAME, HealthNodeTaskParams.INSTANCE, NO_NODE_FOUND); + ClusterState state = ClusterStateCreationUtils.state(node1, node1, allNodes) + .copyAndUpdateMetadata(b -> b.putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build())); + assertThat(HealthNode.findHealthNode(state), nullValue()); + } + + public void testFindHealthNodeMissingNode() { + ClusterState state = ClusterStateCreationUtils.state(node1, node1); + assertThat(HealthNode.findHealthNode(state), nullValue()); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 60611f1ac9735..bed3271ca49d5 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -26,8 +26,11 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.health.node.selection.HealthNode; +import org.elasticsearch.health.node.selection.HealthNodeTaskParams; import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -41,6 +44,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.elasticsearch.test.ESTestCase.randomInt; import static org.elasticsearch.test.ESTestCase.randomIntBetween; @@ -424,6 +428,24 @@ public static ClusterState stateWithNoShard() { * @return cluster state */ public static ClusterState state(DiscoveryNode localNode, DiscoveryNode masterNode, DiscoveryNode... allNodes) { + return state(localNode, masterNode, null, allNodes); + } + + /** + * Creates a cluster state where local node, master and health node can be specified + * + * @param localNode node in allNodes that is the local node + * @param masterNode node in allNodes that is the master node. Can be null if no master exists + * @param healthNode node in allNodes that is the health node. Can be null if no health node exists + * @param allNodes all nodes in the cluster + * @return cluster state + */ + public static ClusterState state( + DiscoveryNode localNode, + DiscoveryNode masterNode, + DiscoveryNode healthNode, + DiscoveryNode... allNodes + ) { DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); for (DiscoveryNode node : allNodes) { discoBuilder.add(node); @@ -436,7 +458,11 @@ public static ClusterState state(DiscoveryNode localNode, DiscoveryNode masterNo ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); state.nodes(discoBuilder); - state.metadata(Metadata.builder().generateClusterUuidIfNeeded()); + Metadata.Builder metadataBuilder = Metadata.builder().generateClusterUuidIfNeeded(); + if (healthNode != null) { + addHealthNode(metadataBuilder, healthNode); + } + state.metadata(metadataBuilder); return state.build(); } @@ -455,4 +481,14 @@ private static String selectAndRemove(Set strings) { strings.remove(selection); return selection; } + + private static Metadata.Builder addHealthNode(Metadata.Builder metadataBuilder, DiscoveryNode healthNode) { + PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); + PersistentTasksCustomMetadata.Assignment assignment = new PersistentTasksCustomMetadata.Assignment( + healthNode.getId(), + randomAlphaOfLength(10) + ); + tasks.addTask(HealthNode.TASK_NAME, HealthNode.TASK_NAME, HealthNodeTaskParams.INSTANCE, assignment); + return metadataBuilder.putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + } } From c0019a3ff64111369fdbb799424adacdf2c67ac4 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 10 Aug 2022 09:04:40 -0700 Subject: [PATCH 162/265] Ensure APM module is always installed in release test clusters (#89223) --- distribution/build.gradle | 2 +- qa/apm/build.gradle | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/distribution/build.gradle b/distribution/build.gradle index 158e7c70091a7..301ca89438b97 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -177,7 +177,7 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { } distro.copyModule(processDefaultOutputsTaskProvider, module) - if (module.name.startsWith('transport-')) { + if (module.name.startsWith('transport-') || (BuildParams.snapshotBuild == false && module.name == 'apm')) { distro.copyModule(processIntegTestOutputsTaskProvider, module) } diff --git a/qa/apm/build.gradle b/qa/apm/build.gradle index 2750f24572b44..e858d43bcbc0a 100644 --- a/qa/apm/build.gradle +++ b/qa/apm/build.gradle @@ -8,7 +8,9 @@ import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.VersionProperties -import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER; +import org.elasticsearch.gradle.internal.info.BuildParams + +import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.test.fixtures' @@ -21,8 +23,7 @@ dependencies { } dockerCompose { - environment.put 'STACK_VERSION', VersionProperties.elasticsearch - // retainContainersOnStartupFailure = true + environment.put 'STACK_VERSION', BuildParams.snapshotBuild ? VersionProperties.elasticsearch : VersionProperties.elasticsearch + "-SNAPSHOT" } elasticsearch_distributions { From 841ac8e43a01914d223d66afd403b2b49a83105c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yoann=20Rodi=C3=A8re?= Date: Wed, 10 Aug 2022 19:19:15 +0200 Subject: [PATCH 163/265] Upgrade Apache Commons Logging to 1.2 (#85745) * Upgrade to Apache Commons Logging 1.2 (#40305) * Clarify that Apache HTTP/commons-* dependencies are not just for tests --- build-tools-internal/version.properties | 12 +++++++----- client/rest/build.gradle | 1 - client/sniffer/build.gradle | 1 - docs/changelog/85745.yaml | 6 ++++++ gradle/verification-metadata.xml | 5 ----- plugins/repository-hdfs/build.gradle | 2 +- test/framework/build.gradle | 1 - 7 files changed, 14 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/85745.yaml diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 52379cf3fb557..5c2a4389e124a 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -30,15 +30,17 @@ bouncycastle=1.64 # used by security and idp (need to be in sync due to cross-dependency in testing) opensaml = 4.0.1 -# test dependencies -randomizedrunner = 2.8.0 -junit = 4.12 -junit5 = 5.7.1 +# client dependencies httpclient = 4.5.13 httpcore = 4.4.13 httpasyncclient = 4.1.5 -commonslogging = 1.1.3 +commonslogging = 1.2 commonscodec = 1.14 + +# test dependencies +randomizedrunner = 2.8.0 +junit = 4.12 +junit5 = 5.7.1 hamcrest = 2.1 mocksocket = 1.2 diff --git a/client/rest/build.gradle b/client/rest/build.gradle index e92779683dac8..97cf0f4cd65e4 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -86,7 +86,6 @@ tasks.named("thirdPartyAudit").configure { 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', - 'org.apache.log4j.Category', 'org.apache.log4j.Level', 'org.apache.log4j.Logger', 'org.apache.log4j.Priority', diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index 9162c813c746b..0d4dbad62cbbf 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -78,7 +78,6 @@ tasks.named("thirdPartyAudit").configure { 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', - 'org.apache.log4j.Category', 'org.apache.log4j.Level', 'org.apache.log4j.Logger', 'org.apache.log4j.Priority', diff --git a/docs/changelog/85745.yaml b/docs/changelog/85745.yaml new file mode 100644 index 0000000000000..00b0da374243c --- /dev/null +++ b/docs/changelog/85745.yaml @@ -0,0 +1,6 @@ +pr: 85745 +summary: Upgrade Apache Commons Logging to 1.2 +area: Client +type: upgrade +issues: + - 40305 diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 366c503becd44..e6eae1a422579 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -1034,11 +1034,6 @@ - - - - - diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index d75fda2b69cdb..6cd9053f2d472 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -48,7 +48,7 @@ dependencies { runtimeOnly "org.apache.hadoop:hadoop-client-runtime:${versions.hadoop}" implementation "org.apache.hadoop:hadoop-hdfs:${versions.hadoop}" api 'com.google.protobuf:protobuf-java:3.4.0' - api 'commons-logging:commons-logging:1.1.3' + api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api 'commons-cli:commons-cli:1.2' api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 5b5674181d3d1..87dee48a74948 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -55,7 +55,6 @@ tasks.named("thirdPartyAudit").configure { 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', - 'org.apache.log4j.Category', 'org.apache.log4j.Level', 'org.apache.log4j.Logger', 'org.apache.log4j.Priority', From 7cc275d54eda73ab40a996577b97a5cdd5838a31 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 10 Aug 2022 17:30:05 -0700 Subject: [PATCH 164/265] Downgrade known bad containerd version during packaging tests (#89255) Looks like `containerd` has been upgraded on the latest RHEL 9 CI workers. This updated package includes `runc` 1.1.3 which seems to include a bug which can cause a failure when trying to attach a terminal to a running container. This is causing our Docker packaging tests to fail when we attempt to do `docker exec --tty`. For now let's just add a bit to our packaging test execution script to downgrade the package if appropriate. Closes https://github.com/elastic/elasticsearch/issues/89247 --- .ci/scripts/packaging-test.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.ci/scripts/packaging-test.sh b/.ci/scripts/packaging-test.sh index 7ef82371f6ad7..7b0e8f3320bed 100755 --- a/.ci/scripts/packaging-test.sh +++ b/.ci/scripts/packaging-test.sh @@ -43,6 +43,13 @@ if [ -f "/etc/os-release" ] ; then sudo apt-get install -y --allow-downgrades lintian=2.15.0 fi fi + if [[ "$ID" == "rhel" ]] ; then + # Downgrade containerd if necessary to work around runc bug + # See: https://github.com/opencontainers/runc/issues/3551 + if containerd -version | grep -sF 1.6.7; then + sudo yum downgrade -y containerd.io + fi + fi else cat /etc/issue || true fi From 453b5b1e7db4733a8c937c04c0280f81274a97dd Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Thu, 11 Aug 2022 08:46:35 +0200 Subject: [PATCH 165/265] Verify auto follower recover after full leader cluster restart (#89207) Re-setting remote connection is no longer required as cluster port is not changing after restart. --- .../org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java index e4900f98bb1b8..b16130387e482 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java @@ -84,11 +84,8 @@ public void testFollowIndex() throws Exception { leaderClient().prepareIndex("index1").setSource("{}", XContentType.JSON).get(); } - cleanRemoteCluster(); getLeaderCluster().fullRestart(); ensureLeaderGreen("index1"); - // Remote connection needs to be re-configured, because all the nodes in leader cluster have been restarted: - setupRemoteCluster(); final long thirdBatchNumDocs = randomIntBetween(10, 200); for (int i = 0; i < thirdBatchNumDocs; i++) { From 0bf31b77fba16ea011e6644daf5f6a036e487db2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 11 Aug 2022 07:48:03 +0100 Subject: [PATCH 166/265] Fix message for stalled shutdown (#89254) Today if a node shutdown is stalled due to unmoveable shards then we say to use the allocation explain API to find details. In fact, since #78727 we include the allocation explanation in the response already so we should tell users just to look at that instead. This commit adjusts the message to address this. --- docs/changelog/89254.yaml | 5 +++++ .../cluster/metadata/ShutdownShardMigrationStatus.java | 4 +++- .../org/elasticsearch/xpack/shutdown/NodeShutdownIT.java | 3 ++- .../xpack/shutdown/TransportGetShutdownStatusAction.java | 6 ++++-- 4 files changed, 14 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/89254.yaml diff --git a/docs/changelog/89254.yaml b/docs/changelog/89254.yaml new file mode 100644 index 0000000000000..d86e6fdc3d856 --- /dev/null +++ b/docs/changelog/89254.yaml @@ -0,0 +1,5 @@ +pr: 89254 +summary: Fix message for stalled shutdown +area: Infra/Node Lifecycle +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java index d49d02f582a29..dbde71aeef67c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java @@ -24,6 +24,8 @@ public class ShutdownShardMigrationStatus implements Writeable, ToXContentObject { private static final Version ALLOCATION_DECISION_ADDED_VERSION = Version.V_7_16_0; + public static final String NODE_ALLOCATION_DECISION_KEY = "node_allocation_decision"; + private final SingleNodeShutdownMetadata.Status status; private final long shardsRemaining; @Nullable @@ -83,7 +85,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("explanation", explanation); } if (Objects.nonNull(allocationDecision)) { - builder.startObject("node_allocation_decision"); + builder.startObject(NODE_ALLOCATION_DECISION_KEY); { allocationDecision.toXContent(builder, params); } diff --git a/x-pack/plugin/shutdown/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownIT.java b/x-pack/plugin/shutdown/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownIT.java index ceda1092f00e6..3d9a38e1ca5da 100644 --- a/x-pack/plugin/shutdown/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownIT.java +++ b/x-pack/plugin/shutdown/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownIT.java @@ -367,9 +367,10 @@ public void testStalledShardMigrationProperlyDetected() throws Exception { ObjectPath.eval("nodes.0.shard_migration.explanation", status), allOf( containsString(indexName), - containsString("cannot move, use the Cluster Allocation Explain API on this shard for details") + containsString("cannot move, see [node_allocation_decision] for details or use the cluster allocation explain API") ) ); + assertThat(ObjectPath.eval("nodes.0.shard_migration.node_allocation_decision", status), notNullValue()); } // Now update the allocation requirements to unblock shard relocation diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java index a87b72eb2ffa1..ceb37f7fdcd46 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java @@ -49,6 +49,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.ShutdownShardMigrationStatus.NODE_ALLOCATION_DECISION_KEY; import static org.elasticsearch.core.Strings.format; public class TransportGetShutdownStatusAction extends TransportMasterNodeAction< @@ -291,10 +292,11 @@ static ShutdownShardMigrationStatus shardMigrationStatus( SingleNodeShutdownMetadata.Status.STALLED, totalRemainingShards, format( - "shard [%s] [%s] of index [%s] cannot move, use the Cluster Allocation Explain API on this shard for details", + "shard [%s] [%s] of index [%s] cannot move, see [%s] for details or use the cluster allocation explain API", shardRouting.shardId().getId(), shardRouting.primary() ? "primary" : "replica", - shardRouting.index().getName() + shardRouting.index().getName(), + NODE_ALLOCATION_DECISION_KEY ), decision ); From 616fd072787d8d4e398acb28fa1558f8dcbcf90f Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 11 Aug 2022 09:25:14 +0100 Subject: [PATCH 167/265] Drop transport client from ping_schedule docs (#89264) The docs for `transport.ping_schedule` note that the transport client defaults to a 5s ping schedule, but this is no longer relevant. This commit drops this from the docs, and also moves the docs for this setting further down the page to reflect its relative unimportance. --- docs/reference/modules/transport.asciidoc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index 3663422a36305..3f5a2ceecad7f 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -62,14 +62,6 @@ Configures the compression scheme for `transport.compress`. The options are upgraded to a version supporting `lz4`, the traffic will be sent uncompressed. Defaults to `lz4`. -`transport.ping_schedule`:: -(<>, <>) -Schedule a regular application-level ping message to ensure that transport -connections between nodes are kept alive. Defaults to `5s` in the transport -client and `-1` (disabled) elsewhere. It is preferable to correctly configure -TCP keep-alives instead of using this feature, because TCP keep-alives apply to -all kinds of long-lived connections and not just to transport connections. - `transport.tcp.keep_alive`:: (<>, boolean) Configures the `SO_KEEPALIVE` option for transport sockets, which determines @@ -122,6 +114,14 @@ The size of the TCP send buffer for transport traffic. Defaults to The size of the TCP receive buffer for transport traffic. Defaults to `network.tcp.receive_buffer_size`. +`transport.ping_schedule`:: +(<>, <>) +Configures the time between sending application-level pings on all transport +connections to promptly detect when a transport connection has failed. Defaults +to `-1` meaning that application-level pings are not sent. You should use TCP +keepalives (see `transport.tcp.keep_alive`) instead of application-level pings +wherever possible. + [[transport-profiles]] ===== Transport profiles From 2d3bcc483d5f77d2df0b51cbf9cc99dd22bfcc9b Mon Sep 17 00:00:00 2001 From: Luca Belluccini Date: Thu, 11 Aug 2022 09:43:53 +0100 Subject: [PATCH 168/265] [DOCS] Warn only one date format is added to the field date formats when using dynamic_date_formats (#88915) * [DOCS] Warn only one date format is added to the field date formats When using multiple options in `dynamic_date_formats`, only one of the formats of the first document having a date matching one of the date formats provided will be used. E.g. ``` PUT my-index-000001 { "mappings": { "dynamic_date_formats": [ "yyyy/MM", "MM/dd/yyyy"] } } PUT my-index-000001/_doc/1 { "create_date": "09/25/2015" } ``` The generated mappings will be: ``` "mappings": { "dynamic_date_formats": [ "yyyy/MM", "MM/dd/yyyy" ], "properties": { "create_date": { "type": "date", "format": "MM/dd/yyyy" } } }, ``` Indexing a document with `2015/12` would lead to the `format` `"yyyy/MM"` being used for the `create_date`. This can be misleading especially if the user is using multiple date formats on the same field. The first document will determine the format of the `date` field being detected. Maybe we should provide an additional example, such as: ``` PUT my-index-000001 { "mappings": { "dynamic_date_formats": [ "yyyy/MM||MM/dd/yyyy"] } } ``` My wording is not great, so feel free to amend/edit. * Update docs/reference/mapping/dynamic/field-mapping.asciidoc Reword and add code example * Turned discussion of the two syntaxes into an admonition * Fix failing tests Co-authored-by: Abdon Pijpelink --- .../mapping/dynamic/field-mapping.asciidoc | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/docs/reference/mapping/dynamic/field-mapping.asciidoc b/docs/reference/mapping/dynamic/field-mapping.asciidoc index 10c4dc5d0742b..45486b70334d4 100644 --- a/docs/reference/mapping/dynamic/field-mapping.asciidoc +++ b/docs/reference/mapping/dynamic/field-mapping.asciidoc @@ -114,6 +114,107 @@ PUT my-index-000001/_doc/1 } -------------------------------------------------- +[NOTE] +==== +There is a difference between configuring an array of date patterns and +configuring multiple patterns in a single string separated by `||`. When you +configure an array of date patterns, the pattern that matches the date in the +first document with an unmapped date field will determine the mapping of that +field: + +[source,console] +-------------------------------------------------- +PUT my-index-000001 +{ + "mappings": { + "dynamic_date_formats": [ "yyyy/MM", "MM/dd/yyyy"] + } +} + +PUT my-index-000001/_doc/1 +{ + "create_date": "09/25/2015" +} +-------------------------------------------------- + +The resulting mapping will be: + +//// +[source,console] +---- +GET my-index-000001/_mapping +---- +//TEST[continued] +//// + +[source,console-result] +-------------------------------------------------- +{ + "my-index-000001": { + "mappings": { + "dynamic_date_formats": [ + "yyyy/MM", + "MM/dd/yyyy" + ], + "properties": { + "create_date": { + "type": "date", + "format": "MM/dd/yyyy" + } + } + } + } +} +-------------------------------------------------- + +Configuring multiple patterns in a single string separated by `||` results in a +mapping that supports any of the date formats. This enables you to index +documents that use different formats: + +[source,console] +-------------------------------------------------- +PUT my-index-000001 +{ + "mappings": { + "dynamic_date_formats": [ "yyyy/MM||MM/dd/yyyy"] + } +} + +PUT my-index-000001/_doc/1 +{ + "create_date": "09/25/2015" +} +-------------------------------------------------- + +The resulting mapping will be: + +//// +[source,console] +---- +GET my-index-000001/_mapping +---- +//TEST[continued] +//// + +[source,console-result] +-------------------------------------------------- +{ + "my-index-000001": { + "mappings": { + "dynamic_date_formats": [ + "yyyy/MM||MM/dd/yyyy" + ], + "properties": { + "create_date": { + "type": "date", + "format": "yyyy/MM||MM/dd/yyyy" + } + } + } + } +} +-------------------------------------------------- +==== [[numeric-detection]] ==== Numeric detection From 7caa2427ef1acbfbcc75b00a5b4fe2c7f34ba57e Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Thu, 11 Aug 2022 11:17:32 +0200 Subject: [PATCH 169/265] Autoscaling requirement for empty tier (#89266) Autoscaling would require a node-level size for an empty tier, which seems wrong and caused ESS to scale the empty tier into existence. Fixed to only report a node level size if the tier must exist. --- .../autoscaling/storage/ReactiveStorageIT.java | 15 +++++++++++++++ .../storage/ReactiveStorageDeciderService.java | 8 +++++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java index 709262918b18c..32c2e6d523708 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java @@ -136,6 +136,7 @@ private void testScaleFromEmptyWarm(boolean allocatable) throws Exception { refresh(); } assertThat(capacity().results().get("warm").requiredCapacity().total().storage().getBytes(), equalTo(0L)); + assertThat(capacity().results().get("warm").requiredCapacity().node().storage().getBytes(), equalTo(0L)); assertAcked( client().admin() @@ -150,6 +151,10 @@ private void testScaleFromEmptyWarm(boolean allocatable) throws Exception { } assertThat(capacity().results().get("warm").requiredCapacity().total().storage().getBytes(), Matchers.greaterThan(0L)); + assertThat( + capacity().results().get("warm").requiredCapacity().node().storage().getBytes(), + Matchers.greaterThan(ReactiveStorageDeciderService.NODE_DISK_OVERHEAD) + ); } @@ -197,7 +202,9 @@ public void testScaleFromEmptyLegacy() { refresh(indexName); assertThat(capacity().results().get("warm").requiredCapacity().total().storage().getBytes(), equalTo(0L)); + assertThat(capacity().results().get("warm").requiredCapacity().node().storage().getBytes(), equalTo(0L)); assertThat(capacity().results().get("cold").requiredCapacity().total().storage().getBytes(), equalTo(0L)); + assertThat(capacity().results().get("cold").requiredCapacity().node().storage().getBytes(), equalTo(0L)); assertAcked( client().admin() @@ -211,8 +218,16 @@ public void testScaleFromEmptyLegacy() { ); assertThat(capacity().results().get("warm").requiredCapacity().total().storage().getBytes(), Matchers.greaterThan(0L)); + assertThat( + capacity().results().get("warm").requiredCapacity().node().storage().getBytes(), + Matchers.greaterThan(ReactiveStorageDeciderService.NODE_DISK_OVERHEAD) + ); // this is not desirable, but one of the caveats of not using data tiers in the ILM policy. assertThat(capacity().results().get("cold").requiredCapacity().total().storage().getBytes(), Matchers.greaterThan(0L)); + assertThat( + capacity().results().get("cold").requiredCapacity().node().storage().getBytes(), + Matchers.greaterThan(ReactiveStorageDeciderService.NODE_DISK_OVERHEAD) + ); } public void testScaleWhileShrinking() throws Exception { diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index 8aca4c652cca6..d9777d0e9a3c0 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -143,14 +143,16 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider long unassignedBytes = unassignedBytesUnassignedShards.sizeInBytes(); long maxShardSize = allocationState.maxShardSize(); long maxNodeLockedSize = allocationState.maxNodeLockedSize(); - long minimumNodeSize = nodeSizeForDataBelowLowWatermark(Math.max(maxShardSize, maxNodeLockedSize), diskThresholdSettings) - + NODE_DISK_OVERHEAD; assert assignedBytes >= 0; assert unassignedBytes >= 0; assert maxShardSize >= 0; String message = message(unassignedBytes, assignedBytes); + long requiredTotalStorage = autoscalingCapacity.total().storage().getBytes() + unassignedBytes + assignedBytes; + long minimumNodeSize = requiredTotalStorage > 0L + ? nodeSizeForDataBelowLowWatermark(Math.max(maxShardSize, maxNodeLockedSize), diskThresholdSettings) + NODE_DISK_OVERHEAD + : 0L; AutoscalingCapacity requiredCapacity = AutoscalingCapacity.builder() - .total(autoscalingCapacity.total().storage().getBytes() + unassignedBytes + assignedBytes, null, null) + .total(requiredTotalStorage, null, null) .node(minimumNodeSize, null, null) .build(); return new AutoscalingDeciderResult( From 12a1290f480d14b40493362452d46915fc3731ec Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Thu, 11 Aug 2022 11:23:30 +0200 Subject: [PATCH 170/265] Mute FrozenExistenceDeciderIT.testZeroToOne (#89271) Related to #89082 --- .../xpack/autoscaling/existence/FrozenExistenceDeciderIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java index 28ab24c2c9e76..409ea7bef260a 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java @@ -73,6 +73,7 @@ protected Collection> nodePlugins() { return List.of(LocalStateAutoscalingAndSearchableSnapshotsAndIndexLifecycle.class); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/89082") public void testZeroToOne() throws Exception { internalCluster().startMasterOnlyNode(); setupRepoAndPolicy(); From 5cbf4fbc68378cd4acdc1f616e2f0b3b78a758ae Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Thu, 11 Aug 2022 14:30:16 +0300 Subject: [PATCH 171/265] [ML] Extract timing of autoscaling into its own class (#89253) Extracts the time keeping necessary for the ML autoscaling decider into its own class. --- .../MlAutoscalingDeciderService.java | 48 +++------- .../xpack/ml/autoscaling/ScaleTimer.java | 71 +++++++++++++++ .../xpack/ml/autoscaling/ScaleTimerTests.java | 89 +++++++++++++++++++ 3 files changed, 172 insertions(+), 36 deletions(-) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/ScaleTimer.java create mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/ScaleTimerTests.java diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java index fb9d352d2504a..8c3b3df423c85 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java @@ -66,9 +66,7 @@ public class MlAutoscalingDeciderService implements AutoscalingDeciderService, LocalNodeMasterListener { private static final Logger logger = LogManager.getLogger(MlAutoscalingDeciderService.class); - private static final Duration DEFAULT_MEMORY_REFRESH_RATE = Duration.ofMinutes(15); private static final String MEMORY_STALE = "unable to make scaling decision as job memory requirements are stale"; - private static final long NO_SCALE_DOWN_POSSIBLE = -1L; // If ensureScaleDown changes the calculation by more than this much, log the error private static final long ACCEPTABLE_DIFFERENCE = ByteSizeValue.ofMb(1).getBytes(); @@ -80,15 +78,13 @@ public class MlAutoscalingDeciderService implements AutoscalingDeciderService, L private final NodeLoadDetector nodeLoadDetector; private final MlMemoryTracker mlMemoryTracker; private final NodeAvailabilityZoneMapper nodeAvailabilityZoneMapper; - private final LongSupplier timeSupplier; + private final ScaleTimer scaleTimer; private volatile boolean isMaster; private volatile int maxMachineMemoryPercent; private volatile int maxOpenJobs; private volatile boolean useAuto; private volatile long mlNativeMemoryForLargestMlNode; - private volatile long lastTimeToScale; - private volatile long scaleDownDetected; public MlAutoscalingDeciderService( MlMemoryTracker memoryTracker, @@ -113,8 +109,7 @@ public MlAutoscalingDeciderService( this.maxOpenJobs = MAX_OPEN_JOBS_PER_NODE.get(settings); this.useAuto = MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT.get(settings); setMaxMlNodeSize(MachineLearning.MAX_ML_NODE_SIZE.get(settings)); - this.timeSupplier = timeSupplier; - this.scaleDownDetected = NO_SCALE_DOWN_POSSIBLE; + this.scaleTimer = new ScaleTimer(timeSupplier); clusterService.getClusterSettings() .addSettingsUpdateConsumer(MachineLearning.MAX_MACHINE_MEMORY_PERCENT, this::setMaxMachineMemoryPercent); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_OPEN_JOBS_PER_NODE, this::setMaxOpenJobs); @@ -287,14 +282,6 @@ public void onMaster() { isMaster = true; } - private void resetScaleDownCoolDown() { - this.scaleDownDetected = NO_SCALE_DOWN_POSSIBLE; - } - - private boolean newScaleDownCheck() { - return scaleDownDetected == NO_SCALE_DOWN_POSSIBLE; - } - NativeMemoryCapacity currentScale(final List machineLearningNodes) { return NativeMemoryCapacity.currentScale(machineLearningNodes, maxMachineMemoryPercent, useAuto); } @@ -309,11 +296,9 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider if (isMaster == false) { throw new IllegalArgumentException("request for scaling information is only allowed on the master node"); } - long previousTimeStamp = lastTimeToScale; - lastTimeToScale = timeSupplier.getAsLong(); - if (previousTimeStamp > 0L && lastTimeToScale > previousTimeStamp) { - mlMemoryTracker.setAutoscalingCheckInterval(Duration.ofMillis(lastTimeToScale - previousTimeStamp)); - } + scaleTimer.markScale(); + scaleTimer.lastScaleToScaleIntervalMillis() + .ifPresent(scaleInterval -> mlMemoryTracker.setAutoscalingCheckInterval(Duration.ofMillis(scaleInterval))); final ClusterState clusterState = context.state(); @@ -401,7 +386,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider reasonBuilder ); if (scaleUpDecision.isPresent()) { - resetScaleDownCoolDown(); + scaleTimer.resetScaleDownCoolDown(); return scaleUpDecision.get(); } @@ -414,7 +399,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider || mlContext.waitingAnomalyJobs.isEmpty() == false || partiallyAllocatedModels.isEmpty() == false) { // We don't want to continue to consider a scale down if there are now waiting jobs - resetScaleDownCoolDown(); + scaleTimer.resetScaleDownCoolDown(); return new AutoscalingDeciderResult( context.currentCapacity(), reasonBuilder.setSimpleReason( @@ -517,7 +502,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider } } - long msLeftToScale = msLeftToDownScale(configuration); + long msLeftToScale = scaleTimer.markDownScaleAndGetMillisLeftFromDelay(configuration); if (msLeftToScale <= 0) { return scaleDownDecisionResult; } @@ -527,7 +512,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider "not scaling down as the current scale down delay [%s] is not satisfied." + " The last time scale down was detected [%s]. Calculated scaled down capacity [%s] ", downScaleDelay.getStringRep(), - DEFAULT_FORMATTER.format(ofEpochMilli(scaleDownDetected)), + DEFAULT_FORMATTER.format(ofEpochMilli(scaleTimer.downScaleDetectedMillis())), scaleDownDecisionResult.requiredCapacity() ) ); @@ -539,7 +524,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider "Passing currently perceived capacity as down scale delay has not been satisfied; configured delay [%s] " + "last detected scale down event [%s]. Will request scale down in approximately [%s]", downScaleDelay.getStringRep(), - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(scaleDownDetected)), + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(scaleTimer.downScaleDetectedMillis())), TimeValue.timeValueMillis(msLeftToScale).getStringRep() ) ).build() @@ -566,7 +551,7 @@ private AutoscalingDeciderResult downscaleToZero( reasonBuilder.setSimpleReason("Passing currently perceived capacity as no scaling changes are necessary").build() ); } - long msLeftToScale = msLeftToDownScale(configuration); + long msLeftToScale = scaleTimer.markDownScaleAndGetMillisLeftFromDelay(configuration); if (msLeftToScale > 0) { return new AutoscalingDeciderResult( context.currentCapacity(), @@ -576,7 +561,7 @@ private AutoscalingDeciderResult downscaleToZero( "Passing currently perceived capacity as down scale delay has not been satisfied; configured delay [%s] " + "last detected scale down event [%s]. Will request scale down in approximately [%s]", DOWN_SCALE_DELAY.get(configuration).getStringRep(), - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(scaleDownDetected)), + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(scaleTimer.downScaleDetectedMillis())), TimeValue.timeValueMillis(msLeftToScale).getStringRep() ) ).build() @@ -1125,15 +1110,6 @@ Optional checkForScaleDown( return Optional.empty(); } - private long msLeftToDownScale(Settings configuration) { - final long now = timeSupplier.getAsLong(); - if (newScaleDownCheck()) { - scaleDownDetected = now; - } - TimeValue downScaleDelay = DOWN_SCALE_DELAY.get(configuration); - return downScaleDelay.millis() - (now - scaleDownDetected); - } - @Override public String name() { return NAME; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/ScaleTimer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/ScaleTimer.java new file mode 100644 index 0000000000000..69613b8ee27cb --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/ScaleTimer.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.autoscaling; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; + +import java.util.Objects; +import java.util.OptionalLong; +import java.util.function.LongSupplier; + +import static org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingDeciderService.DOWN_SCALE_DELAY; + +/** + * A timer for capturing the clock time when scaling decisions + * are requested as well as for determining whether enough time + * has passed for the {@link MlAutoscalingDeciderService#DOWN_SCALE_DELAY}. + */ +class ScaleTimer { + + private static final long NO_SCALE_DOWN_POSSIBLE = -1L; + + private final LongSupplier timeSupplier; + private volatile long previousScaleTimeMs; + private volatile long lastScaleTimeMs; + private volatile long scaleDownDetected; + + ScaleTimer(LongSupplier timeSupplier) { + this.timeSupplier = Objects.requireNonNull(timeSupplier); + this.scaleDownDetected = NO_SCALE_DOWN_POSSIBLE; + } + + void markScale() { + previousScaleTimeMs = lastScaleTimeMs; + lastScaleTimeMs = timeSupplier.getAsLong(); + } + + OptionalLong lastScaleToScaleIntervalMillis() { + if (previousScaleTimeMs > 0L && lastScaleTimeMs > previousScaleTimeMs) { + return OptionalLong.of(lastScaleTimeMs - previousScaleTimeMs); + } + return OptionalLong.empty(); + } + + long markDownScaleAndGetMillisLeftFromDelay(Settings configuration) { + assert lastScaleTimeMs > 0L : "marked downscale without ever marking scale"; + final long now = timeSupplier.getAsLong(); + if (newScaleDownCheck()) { + scaleDownDetected = now; + } + TimeValue downScaleDelay = DOWN_SCALE_DELAY.get(configuration); + return downScaleDelay.millis() - (now - scaleDownDetected); + } + + void resetScaleDownCoolDown() { + this.scaleDownDetected = NO_SCALE_DOWN_POSSIBLE; + } + + private boolean newScaleDownCheck() { + return scaleDownDetected == NO_SCALE_DOWN_POSSIBLE; + } + + long downScaleDetectedMillis() { + return scaleDownDetected; + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/ScaleTimerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/ScaleTimerTests.java new file mode 100644 index 0000000000000..5266f11d2c85d --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/ScaleTimerTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.autoscaling; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.OptionalLong; +import java.util.function.LongSupplier; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class ScaleTimerTests extends ESTestCase { + + public void testLastScaleToScaleIntervalMillis_GivenNoScaleEver() { + ScaleTimer scaleTimer = new ScaleTimer(() -> System.currentTimeMillis()); + + assertThat(scaleTimer.lastScaleToScaleIntervalMillis().isEmpty(), is(true)); + } + + public void testLastScaleToScaleIntervalMillis_GivenSingleScaleEvent() { + ScaleTimer scaleTimer = new ScaleTimer(() -> System.currentTimeMillis()); + + scaleTimer.markScale(); + + assertThat(scaleTimer.lastScaleToScaleIntervalMillis().isEmpty(), is(true)); + } + + public void testLastScaleToScaleIntervalMillis_GivenMultipleScaleEvents() { + ScaleTimer scaleTimer = new ScaleTimer(new MockNowSupplier(100L, 250L, 500L)); + + scaleTimer.markScale(); + scaleTimer.markScale(); + + OptionalLong scaleInterval = scaleTimer.lastScaleToScaleIntervalMillis(); + assertThat(scaleInterval.isPresent(), is(true)); + assertThat(scaleInterval.getAsLong(), equalTo(150L)); + + scaleTimer.markScale(); + + scaleInterval = scaleTimer.lastScaleToScaleIntervalMillis(); + assertThat(scaleInterval.isPresent(), is(true)); + assertThat(scaleInterval.getAsLong(), equalTo(250L)); + } + + public void testMarkDownScaleAndGetMillisLeftFromDelay() { + ScaleTimer scaleTimer = new ScaleTimer(new MockNowSupplier(100L, 100L, 300L, 1300L, 1500L)); + scaleTimer.markScale(); + + long millisLeft = scaleTimer.markDownScaleAndGetMillisLeftFromDelay(Settings.builder().put("down_scale_delay", "1s").build()); + assertThat(scaleTimer.downScaleDetectedMillis(), equalTo(100L)); + assertThat(millisLeft, equalTo(1000L)); + + millisLeft = scaleTimer.markDownScaleAndGetMillisLeftFromDelay(Settings.builder().put("down_scale_delay", "1s").build()); + assertThat(scaleTimer.downScaleDetectedMillis(), equalTo(100L)); + assertThat(millisLeft, equalTo(800L)); + + millisLeft = scaleTimer.markDownScaleAndGetMillisLeftFromDelay(Settings.builder().put("down_scale_delay", "1s").build()); + assertThat(scaleTimer.downScaleDetectedMillis(), equalTo(100L)); + assertThat(millisLeft, equalTo(-200L)); + + scaleTimer.resetScaleDownCoolDown(); + + millisLeft = scaleTimer.markDownScaleAndGetMillisLeftFromDelay(Settings.builder().put("down_scale_delay", "1s").build()); + assertThat(scaleTimer.downScaleDetectedMillis(), equalTo(1500L)); + assertThat(millisLeft, equalTo(1000L)); + } + + private class MockNowSupplier implements LongSupplier { + + private final long[] nows; + private int count = 0; + + private MockNowSupplier(long... nows) { + this.nows = nows; + } + + @Override + public long getAsLong() { + return nows[count >= nows.length ? nows.length - 1 : count++]; + } + } +} From 892ad014ffcaacce928df9a40d82fb34fc051d00 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Thu, 11 Aug 2022 14:40:55 +0300 Subject: [PATCH 172/265] Refactor registering listeners out of constructors (#89265) Classes affected: - Fix LocalHealthMonitor - Refactor HealthNodeTaskExecutor - Refactor HealthMetadataService --- .../metadata/HealthMetadataService.java | 11 +++++++++- .../health/node/LocalHealthMonitor.java | 18 ++++++++++++++-- .../selection/HealthNodeTaskExecutor.java | 21 +++++++++++++------ .../java/org/elasticsearch/node/Node.java | 6 +++--- .../health/node/LocalHealthMonitorTests.java | 4 ++-- .../selection/HealthNodeExecutorTests.java | 10 ++++----- 6 files changed, 51 insertions(+), 19 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java index 73a055bb305e9..519dbf69be6ed 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java @@ -53,11 +53,20 @@ public class HealthMetadataService { // us from checking the cluster state before the cluster state is initialized private volatile boolean isMaster = false; - public HealthMetadataService(ClusterService clusterService, Settings settings) { + private HealthMetadataService(ClusterService clusterService, Settings settings) { this.clusterService = clusterService; this.settings = settings; this.clusterStateListener = this::updateOnClusterStateChange; this.enabled = ENABLED_SETTING.get(settings); + } + + public static HealthMetadataService create(ClusterService clusterService, Settings settings) { + HealthMetadataService healthMetadataService = new HealthMetadataService(clusterService, settings); + healthMetadataService.registerListeners(); + return healthMetadataService; + } + + private void registerListeners() { if (this.enabled) { this.clusterService.addListener(clusterStateListener); } diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index 8bdf4e6859d7c..ace6b6532d968 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -65,16 +65,30 @@ public class LocalHealthMonitor implements ClusterStateListener { // Keeps the latest health state that was successfully reported. private volatile DiskHealthInfo lastReportedDiskHealthInfo = null; - public LocalHealthMonitor(Settings settings, ClusterService clusterService, NodeService nodeService, ThreadPool threadPool) { + private LocalHealthMonitor(Settings settings, ClusterService clusterService, NodeService nodeService, ThreadPool threadPool) { this.threadPool = threadPool; this.monitorInterval = POLL_INTERVAL_SETTING.get(settings); this.enabled = HealthNodeTaskExecutor.ENABLED_SETTING.get(settings); this.clusterService = clusterService; this.diskCheck = new DiskCheck(nodeService); - clusterService.addListener(this); + } + + public static LocalHealthMonitor create( + Settings settings, + ClusterService clusterService, + NodeService nodeService, + ThreadPool threadPool + ) { + LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor(settings, clusterService, nodeService, threadPool); + localHealthMonitor.registerListeners(); + return localHealthMonitor; + } + + private void registerListeners() { ClusterSettings clusterSettings = clusterService.getClusterSettings(); clusterSettings.addSettingsUpdateConsumer(POLL_INTERVAL_SETTING, this::setMonitorInterval); clusterSettings.addSettingsUpdateConsumer(HealthNodeTaskExecutor.ENABLED_SETTING, this::setEnabled); + clusterService.addListener(this); } void setMonitorInterval(TimeValue monitorInterval) { diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java index 5cf7f4628d309..233a7c4077796 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java @@ -63,18 +63,27 @@ public final class HealthNodeTaskExecutor extends PersistentTasksExecutor> builtinTaskExecutors = HealthNode.isEnabled() ? List.of(systemIndexMigrationExecutor, healthNodeTaskExecutor) @@ -948,10 +948,10 @@ protected Node( ); HealthService healthService = createHealthService(clusterService, clusterModule, coordinationDiagnosticsService); HealthMetadataService healthMetadataService = HealthNode.isEnabled() - ? new HealthMetadataService(clusterService, settings) + ? HealthMetadataService.create(clusterService, settings) : null; LocalHealthMonitor localHealthMonitor = HealthNode.isEnabled() - ? new LocalHealthMonitor(settings, clusterService, nodeService, threadPool) + ? LocalHealthMonitor.create(settings, clusterService, nodeService, threadPool) : null; FileSettingsService fileSettingsService = new FileSettingsService( diff --git a/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java index 24b1b58a75eb1..6bc252dff03e2 100644 --- a/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java @@ -111,7 +111,7 @@ public void setUp() throws Exception { public void testUpdateNodeHealthStatus() { simulateHealthDiskSpace(); - LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor(Settings.EMPTY, clusterService, nodeService, threadPool); + LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create(Settings.EMPTY, clusterService, nodeService, threadPool); assertThat(localHealthMonitor.getLastReportedDiskHealthInfo(), nullValue()); localHealthMonitor.monitorHealth(); assertThat(localHealthMonitor.getLastReportedDiskHealthInfo(), equalTo(new DiskHealthInfo(HealthStatus.GREEN, null))); @@ -121,7 +121,7 @@ public void testEnablingAndDisabling() throws Exception { simulateHealthDiskSpace(); DiskHealthInfo healthyNode = new DiskHealthInfo(HealthStatus.GREEN, null); when(clusterService.state()).thenReturn(null); - LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor(Settings.EMPTY, clusterService, nodeService, threadPool); + LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create(Settings.EMPTY, clusterService, nodeService, threadPool); // Ensure that there are no issues if the cluster state hasn't been initialized yet localHealthMonitor.setEnabled(true); diff --git a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeExecutorTests.java b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeExecutorTests.java index c9f78b4b0a52a..42c50b6ce300e 100644 --- a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeExecutorTests.java @@ -80,7 +80,7 @@ public void tearDown() throws Exception { } public void testTaskCreation() { - HealthNodeTaskExecutor executor = new HealthNodeTaskExecutor(clusterService, persistentTasksService, settings, clusterSettings); + HealthNodeTaskExecutor executor = HealthNodeTaskExecutor.create(clusterService, persistentTasksService, settings, clusterSettings); executor.startTask(new ClusterChangedEvent("", initialState(), ClusterState.EMPTY_STATE)); verify(persistentTasksService, times(1)).sendStartRequest( eq("health-node"), @@ -91,7 +91,7 @@ public void testTaskCreation() { } public void testSkippingTaskCreationIfItExists() { - HealthNodeTaskExecutor executor = new HealthNodeTaskExecutor(clusterService, persistentTasksService, settings, clusterSettings); + HealthNodeTaskExecutor executor = HealthNodeTaskExecutor.create(clusterService, persistentTasksService, settings, clusterSettings); executor.startTask(new ClusterChangedEvent("", stateWithHealthNodeSelectorTask(initialState()), ClusterState.EMPTY_STATE)); verify(persistentTasksService, never()).sendStartRequest( eq("health-node"), @@ -102,7 +102,7 @@ public void testSkippingTaskCreationIfItExists() { } public void testDoNothingIfAlreadyShutdown() { - HealthNodeTaskExecutor executor = new HealthNodeTaskExecutor(clusterService, persistentTasksService, settings, clusterSettings); + HealthNodeTaskExecutor executor = HealthNodeTaskExecutor.create(clusterService, persistentTasksService, settings, clusterSettings); HealthNode task = mock(HealthNode.class); PersistentTaskState state = mock(PersistentTaskState.class); executor.nodeOperation(task, new HealthNodeTaskParams(), state); @@ -112,7 +112,7 @@ public void testDoNothingIfAlreadyShutdown() { } public void testAbortOnShutdown() { - HealthNodeTaskExecutor executor = new HealthNodeTaskExecutor(clusterService, persistentTasksService, settings, clusterSettings); + HealthNodeTaskExecutor executor = HealthNodeTaskExecutor.create(clusterService, persistentTasksService, settings, clusterSettings); HealthNode task = mock(HealthNode.class); PersistentTaskState state = mock(PersistentTaskState.class); executor.nodeOperation(task, new HealthNodeTaskParams(), state); @@ -123,7 +123,7 @@ public void testAbortOnShutdown() { } public void testAbortOnDisable() { - HealthNodeTaskExecutor executor = new HealthNodeTaskExecutor(clusterService, persistentTasksService, settings, clusterSettings); + HealthNodeTaskExecutor executor = HealthNodeTaskExecutor.create(clusterService, persistentTasksService, settings, clusterSettings); HealthNode task = mock(HealthNode.class); PersistentTaskState state = mock(PersistentTaskState.class); executor.nodeOperation(task, new HealthNodeTaskParams(), state); From 993e467615ad53471f4e7ade3f8ff9da03fb4f16 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Thu, 11 Aug 2022 13:53:32 +0200 Subject: [PATCH 173/265] Sort ranges in geo_distance aggregation (#89154) This commit sorts ranges provided in a geo_distance aggregation, otherwise it fails to provided the right results if ranges are unordered. --- docs/changelog/89154.yaml | 6 +++++ .../aggregations/bucket/GeoDistanceIT.java | 24 ++++++++++++------- .../bucket/range/AbstractRangeBuilder.java | 5 +++- .../range/GeoDistanceAggregationBuilder.java | 2 +- 4 files changed, 26 insertions(+), 11 deletions(-) create mode 100644 docs/changelog/89154.yaml diff --git a/docs/changelog/89154.yaml b/docs/changelog/89154.yaml new file mode 100644 index 0000000000000..2b985a677f2d4 --- /dev/null +++ b/docs/changelog/89154.yaml @@ -0,0 +1,6 @@ +pr: 89154 +summary: Sort ranges in `geo_distance` aggregation +area: Geo +type: bug +issues: + - 89147 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 4755d5b2bfece..b91c481f8e641 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.range.GeoDistanceAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; @@ -28,9 +29,11 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.function.Consumer; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.geoDistance; @@ -128,15 +131,18 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSimple() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000) - ) - .get(); + List> ranges = new ArrayList<>(); + ranges.add(b -> b.addUnboundedTo(500)); + ranges.add(b -> b.addRange(500, 1000)); + ranges.add(b -> b.addUnboundedFrom(1000)); + // add ranges in any order + Collections.shuffle(ranges, random()); + GeoDistanceAggregationBuilder builder = geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS); + for (Consumer range : ranges) { + range.accept(builder); + } + SearchResponse response = client().prepareSearch("idx").addAggregation(builder).get(); assertSearchResponse(response); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java index ccaf8f8f1210d..87f726b47579e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java @@ -83,7 +83,10 @@ protected Range[] processRanges(Function rangeProcessor) { return ranges; } - private static void sortRanges(final Range[] ranges) { + /** + * Sort the provided ranges in place. + */ + static void sortRanges(final Range[] ranges) { new InPlaceMergeSorter() { @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index e3fdec316efd6..4472c3f0628c6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -456,7 +456,7 @@ protected ValuesSourceAggregatorFactory innerBuild( if (ranges.length == 0) { throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation"); } - + AbstractRangeBuilder.sortRanges(ranges); return new GeoDistanceRangeAggregatorFactory( name, config, From 88a0f6f9e54354f0e56da8bfb68a2af47176a54c Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Thu, 11 Aug 2022 14:11:34 +0200 Subject: [PATCH 174/265] Check for polygon self-intersections in ShapeFieldMapper (#89210) This commit set the self-intersection flag to true so we can give better error message to the users. --- .../spatial/index/mapper/CartesianShapeIndexer.java | 2 +- .../mapper/GeoShapeWithDocValuesFieldMapperTests.java | 9 +++++++++ .../spatial/index/mapper/ShapeFieldMapperTests.java | 9 +++++++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeIndexer.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeIndexer.java index 52af533184959..b8e665c0c768a 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeIndexer.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeIndexer.java @@ -111,7 +111,7 @@ public Void visit(Point point) { @Override public Void visit(Polygon polygon) { - addFields(XYShape.createIndexableFields(name, ShapeUtils.toLuceneXYPolygon(polygon))); + addFields(XYShape.createIndexableFields(name, ShapeUtils.toLuceneXYPolygon(polygon), true)); return null; } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java index 7085da9b8b1b7..07a42660003a8 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java @@ -404,6 +404,15 @@ public void testMultiFieldsDeprecationWarning() throws Exception { assertWarnings("Adding multifields to [geo_shape] mappers has no effect and will be forbidden in future"); } + public void testSelfIntersectPolygon() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> mapper.parse(source(b -> b.field("field", "POLYGON((0 0, 1 1, 0 1, 1 0, 0 0))"))) + ); + assertThat(ex.getCause().getMessage(), containsString("Polygon self-intersection at lat=0.5 lon=0.5")); + } + public String toXContentString(GeoShapeWithDocValuesFieldMapper mapper, boolean includeDefaults) { if (includeDefaults) { ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true")); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java index d7223d88789b2..c4098b852b74a 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java @@ -360,6 +360,15 @@ public void testMultiFieldsDeprecationWarning() throws Exception { assertWarnings("Adding multifields to [shape] mappers has no effect and will be forbidden in future"); } + public void testSelfIntersectPolygon() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> mapper.parse(source(b -> b.field(FIELD_NAME, "POLYGON((0 0, 1 1, 0 1, 1 0, 0 0))"))) + ); + assertThat(ex.getCause().getMessage(), containsString("Polygon self-intersection at lat=0.5 lon=0.5")); + } + public String toXContentString(ShapeFieldMapper mapper, boolean includeDefaults) { if (includeDefaults) { ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true")); From e4a19d4c03bad27f24e716830b276cf4a2cf62a0 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 11 Aug 2022 09:18:50 -0500 Subject: [PATCH 175/265] Fixing remote master stability request when there has never been an elected master (#89214) This fixes an edge case in the master stability polling code from #89014. If there has not been an elected master node for the entire life of a non-master-eligible node, then `clusterChanged()` will have never been called on that node, so `beginPollingRemoteMasterStabilityDiagnostic()` will have never been called. And even though the node might know of some master-eligible nodes, it will never have requested diagnostic information from them. This PR adds a call to `beginPollingRemoteMasterStabilityDiagnostic` in `CoordinationDiagnosticsService`'s constructor to cover this edge case. In almost all cases, `clusterChanged()` will be called within 10 seconds so the polling will never occur. However if there is no master node then there will be no cluster changed events, and `clusterChanged()` will not be called, and the results of the polling will likely be useful. This PR has several possibly controversial pieces of code. I'm listing them here with some discussion: 1. Because there is now a call to `beginPollingRemoteMasterStabilityDiagnostic()` in the ~~constructor~~ object's initialization code, `beginPollingRemoteMasterStabilityDiagnostic()` is no longer solely called from the cluster change thread. However, this call happens before the object is registered as a cluster service listener, so there is no new thread safety concern. 2. Because there is now a call to `beginPollingRemoteMasterStabilityDiagnostic()` in the ~~constructor~~ object's initialization code, we have to explicitly switch to the system context so that the various transport requests work in secure mode. 3. ~~When we're in the constructor, we don't actually know yet whether we're a master eligible node or not, so we kick off `beginPollingRemoteMasterStabilityDiagnostic()` for all node types, including master-eligible nodes. This will be fairly harmless for master eligible nodes though. In the worst case, they'll retrieve some information that they'll never use. This explains why `clusterChanged()` now cancels polling even if we are on a master eligible node.~~ 4. ~~It is now possible that we use `clusterService.state()` before it is ready when we're trying to get the list of master-eligible peers. In production mode this method returns null, so we can check that before using it. If assertions are enabled in the JVM, just calling that method throws an `AssertionError`. I'm currently catching that with the assumption that it is harmless because there does not seem to be a way around it (without even further complicating code).~~ 5. ~~It is now possible that we call `transportService.sendRequest()` before the transport service is ready. This happens if the server is initializing unusually slowly (i.e. it takes more than 10 seconds to complete the `Node` constructor) and if assertions are enabled. I don't see a way around this without further complicating the code, so I'm catching `AssertionError` and moving on, with the assumption that it will work 10 seconds later when it runs again. I'm also catching and storing `Exception`, which I think I should have been doing before anyway.~~ Note: Points 3, 4, and 5 are no longer relevant because I moved the call to `beginPollingRemoteMasterStabilityDiagnostic()` out of the constructor, and am now calling it after the transport service and cluster state have been initialized. --- .../CoordinationDiagnosticsServiceIT.java | 56 +++++++++++++------ .../CoordinationDiagnosticsService.java | 54 +++++++++++++----- .../java/org/elasticsearch/node/Node.java | 5 ++ .../elasticsearch/threadpool/ThreadPool.java | 7 +++ 4 files changed, 93 insertions(+), 29 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java index cdd84a829b44f..4cc4589d71350 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java @@ -49,6 +49,11 @@ private void setBootstrapMasterNodeIndex() { internalCluster().setBootstrapMasterNodeIndex(0); } + @Before + private void restoreDefaultInitialDelay() { + CoordinationDiagnosticsService.remoteRequestInitialDelay = new TimeValue(10, TimeUnit.SECONDS); + } + @Override protected Collection> nodePlugins() { return Collections.singletonList(MockTransportService.TestPlugin.class); @@ -90,7 +95,7 @@ public void testBlockClusterStateProcessingOnOneNode() throws Exception { diagnosticsOnBlockedNode.clusterFormationResponses = nodeToClusterFormationStateMap; diagnosticsOnBlockedNode.clusterFormationInfoTasks = cancellables; - diagnosticsOnBlockedNode.remoteRequestInitialDelay = TimeValue.ZERO; + CoordinationDiagnosticsService.remoteRequestInitialDelay = TimeValue.ZERO; diagnosticsOnBlockedNode.beginPollingClusterFormationInfo( nodesWithoutBlockedNode, nodeToClusterFormationStateMap::put, @@ -150,7 +155,7 @@ public void testBeginPollingRemoteStableMasterHealthIndicatorService() throws Ex diagnosticsOnBlockedNode.remoteCoordinationDiagnosisResult = result; diagnosticsOnBlockedNode.remoteCoordinationDiagnosisTask = cancellable; - diagnosticsOnBlockedNode.remoteRequestInitialDelay = TimeValue.ZERO; + CoordinationDiagnosticsService.remoteRequestInitialDelay = TimeValue.ZERO; diagnosticsOnBlockedNode.beginPollingRemoteMasterStabilityDiagnostic(result::set, cancellable); // while the node is blocked from processing cluster state changes it should reach out to the other 2 @@ -195,7 +200,7 @@ public void testNoQuorumSeenFromNonMasterNodes() throws Exception { .build() ); internalCluster().getInstances(CoordinationDiagnosticsService.class) - .forEach(coordinationDiagnosticsService -> coordinationDiagnosticsService.remoteRequestInitialDelay = TimeValue.ZERO); + .forEach(coordinationDiagnosticsService -> CoordinationDiagnosticsService.remoteRequestInitialDelay = TimeValue.ZERO); ensureStableCluster(5); String firstMasterNode = internalCluster().getMasterName(); List nonActiveMasterNodes = masterNodes.stream().filter(nodeName -> firstMasterNode.equals(nodeName) == false).toList(); @@ -230,17 +235,16 @@ public void testNoQuorumSeenFromNonMasterNodes() throws Exception { public void testNoMasterElected() throws Exception { /* - * This test starts up a 3-node cluster where all nodes are master eligible. It then shuts down two of the nodes and restarts one - * of them. We then assert that diagnoseMasterStability returns a red status because a quorum can't be formed. This is an edge - * case because since there is no elected master, clusterChanged() is never called (which is what usually kicks off the polling - * that drives the quorum check). + * This test starts up a 4-node cluster where 3 nodes are master eligible. It then shuts down two of the master eligible nodes and + * restarts one of the master eligible nodes and the data-only node. We then assert that diagnoseMasterStability returns a red + * status because a quorum can't be formed on both of those nodes. This is an edge case because since there is no elected master, + * clusterChanged() is never called (which is what usually kicks off the polling that drives the quorum check). */ - final List masterNodeNames = internalCluster().startMasterOnlyNodes( - 3, - Settings.builder().put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build() - ); - ensureStableCluster(3); - String randomMasterNodeName = internalCluster().getRandomNodeName(); + Settings settings = Settings.builder().put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build(); + final List masterNodeNames = internalCluster().startMasterOnlyNodes(3, settings); + final String dataNodeName = internalCluster().startDataOnlyNode(settings); + ensureStableCluster(4); + String randomMasterNodeName = randomFrom(masterNodeNames); masterNodeNames.stream().filter(nodeName -> nodeName.equals(randomMasterNodeName) == false).forEach(nodeName -> { try { internalCluster().stopNode(nodeName); @@ -248,25 +252,45 @@ public void testNoMasterElected() throws Exception { throw new RuntimeException(e); } }); - internalCluster().restartNode(randomMasterNodeName, new InternalTestCluster.RestartCallback() { + InternalTestCluster.RestartCallback nonValidatingRestartCallback = new InternalTestCluster.RestartCallback() { public boolean validateClusterForming() { return false; } - }); + }; + CoordinationDiagnosticsService.remoteRequestInitialDelay = TimeValue.ZERO; + internalCluster().restartNode(randomMasterNodeName, nonValidatingRestartCallback); + internalCluster().restartNode(dataNodeName, nonValidatingRestartCallback); try { CoordinationDiagnosticsService diagnosticsOnMasterEligibleNode = internalCluster().getInstance( CoordinationDiagnosticsService.class, randomMasterNodeName ); - diagnosticsOnMasterEligibleNode.remoteRequestInitialDelay = TimeValue.ZERO; CoordinationDiagnosticsService.CoordinationDiagnosticsResult result = diagnosticsOnMasterEligibleNode.diagnoseMasterStability( true ); assertThat(result.status(), equalTo(CoordinationDiagnosticsService.CoordinationDiagnosticsStatus.RED)); assertThat(result.summary(), containsString("the master eligible nodes are unable to form a quorum")); + CoordinationDiagnosticsService diagnosticsOnDataNode = internalCluster().getInstance( + CoordinationDiagnosticsService.class, + dataNodeName + ); + + assertBusy(() -> { + assertNotNull(diagnosticsOnDataNode.remoteCoordinationDiagnosisResult.get()); + assertNotNull(diagnosticsOnDataNode.remoteCoordinationDiagnosisResult.get().result()); + assertThat( + diagnosticsOnDataNode.remoteCoordinationDiagnosisResult.get().result().summary(), + containsString("the master eligible nodes are unable to form a quorum") + ); + assertThat( + diagnosticsOnDataNode.remoteCoordinationDiagnosisResult.get().result().status(), + equalTo(CoordinationDiagnosticsService.CoordinationDiagnosticsStatus.RED) + ); + }); } finally { internalCluster().stopNode(randomMasterNodeName); // This is needed for the test to clean itself up happily + internalCluster().stopNode(dataNodeName); // This is needed for the test to clean itself up happily } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index 77e585a0178b1..b474cb67772a5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -122,7 +123,7 @@ public class CoordinationDiagnosticsService implements ClusterStateListener { * user-configurable, but is non-final so that integration tests don't have to waste 10 seconds. */ // Non-private for testing - TimeValue remoteRequestInitialDelay = new TimeValue(10, TimeUnit.SECONDS); + static TimeValue remoteRequestInitialDelay = new TimeValue(10, TimeUnit.SECONDS); private static final Logger logger = LogManager.getLogger(CoordinationDiagnosticsService.class); @@ -170,6 +171,28 @@ public CoordinationDiagnosticsService( this.nodeHasMasterLookupTimeframe = NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.get(clusterService.getSettings()); this.unacceptableNullTransitions = NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.get(clusterService.getSettings()); this.unacceptableIdentityChanges = IDENTITY_CHANGES_THRESHOLD_SETTING.get(clusterService.getSettings()); + } + + /** + * This method completes the initialization of the CoordinationDiagnosticsService. It kicks off polling for remote master stability + * results on non-master-eligible nodes, and registers the service as a cluster service listener on all nodes. + */ + public void start() { + /* + * This is called here to cover an edge case -- when there are master-eligible nodes in the cluster but none of them has been + * elected master. In the most common case this node will receive a ClusterChangedEvent that results in this polling being + * cancelled almost immediately. If that does not happen, then we do in fact need to be polling. Unfortunately there is no way to + * tell at this point whether this node is master-eligible or not, so we kick this off regardless. On master-eligible nodes the + * results will always be harmlessly ignored. Note that beginPollingRemoteMasterStabilityDiagnostic results in several internal + * transport actions being called, so it must run in the system context. + */ + if (clusterService.localNode().isMasterNode() == false) { + final ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + threadContext.markAsSystemContext(); + beginPollingRemoteMasterStabilityDiagnostic(); + } + } clusterService.addListener(this); } @@ -669,6 +692,7 @@ public void clusterChanged(ClusterChangedEvent event) { */ void beginPollingClusterFormationInfo() { assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME); + assert ThreadPool.assertInSystemContext(transportService.getThreadPool()); cancelPollingClusterFormationInfo(); ConcurrentMap responses = new ConcurrentHashMap<>(); Map cancellables = new ConcurrentHashMap<>(); @@ -819,7 +843,7 @@ private Scheduler.Cancellable fetchClusterFormationInfo( } void beginPollingRemoteMasterStabilityDiagnostic() { - assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME); + assert ThreadPool.assertInSystemContext(transportService.getThreadPool()); AtomicReference cancellableReference = new AtomicReference<>(); AtomicReference resultReference = new AtomicReference<>(); remoteCoordinationDiagnosisTask = cancellableReference; @@ -967,16 +991,20 @@ private Scheduler.Cancellable sendTransportRequest logger.trace("Opened connection to {}, making transport request", masterEligibleNode); // If we don't get a response in 10 seconds that is a failure worth capturing on its own: final TimeValue transportTimeout = TimeValue.timeValueSeconds(10); - transportService.sendRequest( - masterEligibleNode, - transportActionType.name(), - transportActionRequest, - TransportRequestOptions.timeout(transportTimeout), - new ActionListenerResponseHandler<>( - ActionListener.runBefore(fetchRemoteResultListener, () -> Releasables.close(releasable)), - transportActionType.getResponseReader() - ) - ); + try { + transportService.sendRequest( + masterEligibleNode, + transportActionType.name(), + transportActionRequest, + TransportRequestOptions.timeout(transportTimeout), + new ActionListenerResponseHandler<>( + ActionListener.runBefore(fetchRemoteResultListener, () -> Releasables.close(releasable)), + transportActionType.getResponseReader() + ) + ); + } catch (Exception e) { + responseConsumer.accept(responseTransformationFunction.apply(null, e)); + } } }, e -> { logger.warn("Exception connecting to master masterEligibleNode", e); @@ -988,7 +1016,7 @@ private Scheduler.Cancellable sendTransportRequest logger.trace("Received remote response from {} in {}", masterEligibleNode, TimeValue.timeValueNanos(endTime - startTime)); responseConsumer.accept(responseTransformationFunction.apply(response, null)); }, e -> { - logger.warn("Exception in remote request to master masterEligibleNode", e); + logger.warn("Exception in remote request to master" + masterEligibleNode, e); responseConsumer.accept(responseTransformationFunction.apply(null, e)); }); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 059f4062b53f6..ff3d28bdce663 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -1323,6 +1323,11 @@ public Node start() throws NodeValidationException { assert clusterService.localNode().equals(localNodeFactory.getNode()) : "clusterService has a different local node than the factory provided"; transportService.acceptIncomingRequests(); + /* + * CoordinationDiagnosticsService expects to be able to send transport requests and use the cluster state, so it is important to + * start it here after the clusterService and transportService have been started. + */ + injector.getInstance(CoordinationDiagnosticsService.class).start(); coordinator.startInitialJoin(); final TimeValue initialStateTimeout = INITIAL_STATE_TIMEOUT_SETTING.get(settings()); configureNodeAndClusterIdStateListener(clusterService); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 6b9485ca2717c..87ac04cc8989f 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -917,6 +917,13 @@ public static boolean assertCurrentThreadPool(String... permittedThreadPoolNames return true; } + public static boolean assertInSystemContext(ThreadPool threadPool) { + final var threadName = Thread.currentThread().getName(); + assert threadName.startsWith("TEST-") || threadName.startsWith("LuceneTestCase") || threadPool.getThreadContext().isSystemContext() + : threadName + " is not running in the system context nor a test thread"; + return true; + } + public static boolean assertCurrentMethodIsNotCalledRecursively() { final StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); assert stackTraceElements.length >= 3 : stackTraceElements.length; From 6c12fe0d67a51872347c168cb9198a6723e6f529 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Thu, 11 Aug 2022 16:50:57 +0200 Subject: [PATCH 176/265] [Transform] add an unattended mode setting to transform (#89212) add an unattended mode setting. This will change how transform reacts on certain error types and lets transform run without failing. --- docs/changelog/89212.yaml | 5 + .../xpack/core/transform/TransformField.java | 2 + .../transform/transforms/SettingsConfig.java | 83 +++++- .../transform/transforms/TransformConfig.java | 9 +- .../transforms/SettingsConfigTests.java | 82 +++++- .../TransformConfigUpdateTests.java | 18 +- .../transform/integration/TransformIT.java | 4 +- .../transforms/ClientTransformIndexer.java | 31 +- .../transforms/TransformContext.java | 22 ++ .../transforms/TransformFailureHandler.java | 275 ++++++++++++++++++ .../transforms/TransformIndexer.java | 147 +--------- .../TransformFailureHandlerTests.java | 128 ++++++++ .../TransformIndexerFailureHandlingTests.java | 115 +++----- ...IndexerFailureOnStatePersistenceTests.java | 8 +- .../TransformIndexerStateTests.java | 2 +- .../schema/transform_config.schema.json | 7 + 16 files changed, 663 insertions(+), 275 deletions(-) create mode 100644 docs/changelog/89212.yaml create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java create mode 100644 x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java diff --git a/docs/changelog/89212.yaml b/docs/changelog/89212.yaml new file mode 100644 index 0000000000000..123023d0dd704 --- /dev/null +++ b/docs/changelog/89212.yaml @@ -0,0 +1,5 @@ +pr: 89212 +summary: Add an unattended mode setting to transform +area: Transform +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformField.java index 24b147c9e85ad..f100bac64525c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformField.java @@ -42,6 +42,8 @@ public final class TransformField { public static final ParseField USE_PIT = new ParseField("use_point_in_time"); public static final ParseField DEDUCE_MAPPINGS = new ParseField("deduce_mappings"); public static final ParseField NUM_FAILURE_RETRIES = new ParseField("num_failure_retries"); + public static final ParseField UNATTENDED = new ParseField("unattended"); + public static final ParseField FIELD = new ParseField("field"); public static final ParseField SYNC = new ParseField("sync"); public static final ParseField TIME = new ParseField("time"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java index 3c715f136be7a..d7479470c3913 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java @@ -43,6 +43,7 @@ public class SettingsConfig implements Writeable, ToXContentObject { private static final int DEFAULT_USE_PIT = -1; private static final int DEFAULT_DEDUCE_MAPPINGS = -1; private static final int DEFAULT_NUM_FAILURE_RETRIES = -2; + private static final int DEFAULT_UNATTENDED = -1; private static ConstructingObjectParser createParser(boolean lenient) { ConstructingObjectParser parser = new ConstructingObjectParser<>( @@ -55,7 +56,8 @@ private static ConstructingObjectParser createParser(boole (Integer) args[3], (Integer) args[4], (Integer) args[5], - (Integer) args[6] + (Integer) args[6], + (Integer) args[7] ) ); parser.declareIntOrNull(optionalConstructorArg(), DEFAULT_MAX_PAGE_SEARCH_SIZE, TransformField.MAX_PAGE_SEARCH_SIZE); @@ -89,6 +91,13 @@ private static ConstructingObjectParser createParser(boole ValueType.BOOLEAN_OR_NULL ); parser.declareIntOrNull(optionalConstructorArg(), DEFAULT_NUM_FAILURE_RETRIES, TransformField.NUM_FAILURE_RETRIES); + // this boolean requires 4 possible values: true, false, not_specified, default, therefore using a custom parser + parser.declareField( + optionalConstructorArg(), + p -> p.currentToken() == XContentParser.Token.VALUE_NULL ? DEFAULT_UNATTENDED : p.booleanValue() ? 1 : 0, + TransformField.UNATTENDED, + ValueType.BOOLEAN_OR_NULL + ); return parser; } @@ -99,9 +108,10 @@ private static ConstructingObjectParser createParser(boole private final Integer usePit; private final Integer deduceMappings; private final Integer numFailureRetries; + private final Integer unattended; public SettingsConfig() { - this(null, null, (Integer) null, (Integer) null, (Integer) null, (Integer) null, (Integer) null); + this(null, null, (Integer) null, (Integer) null, (Integer) null, (Integer) null, (Integer) null, (Integer) null); } public SettingsConfig( @@ -111,7 +121,8 @@ public SettingsConfig( Boolean alignCheckpoints, Boolean usePit, Boolean deduceMappings, - Integer numFailureRetries + Integer numFailureRetries, + Boolean unattended ) { this( maxPageSearchSize, @@ -120,7 +131,8 @@ public SettingsConfig( alignCheckpoints == null ? null : alignCheckpoints ? 1 : 0, usePit == null ? null : usePit ? 1 : 0, deduceMappings == null ? null : deduceMappings ? 1 : 0, - numFailureRetries + numFailureRetries, + unattended == null ? null : unattended ? 1 : 0 ); } @@ -131,7 +143,8 @@ public SettingsConfig( Integer alignCheckpoints, Integer usePit, Integer deduceMappings, - Integer numFailureRetries + Integer numFailureRetries, + Integer unattended ) { this.maxPageSearchSize = maxPageSearchSize; this.docsPerSecond = docsPerSecond; @@ -140,6 +153,7 @@ public SettingsConfig( this.usePit = usePit; this.deduceMappings = deduceMappings; this.numFailureRetries = numFailureRetries; + this.unattended = unattended; } public SettingsConfig(final StreamInput in) throws IOException { @@ -170,6 +184,11 @@ public SettingsConfig(final StreamInput in) throws IOException { } else { numFailureRetries = null; } + if (in.getVersion().onOrAfter(Version.V_8_5_0)) { + unattended = in.readOptionalInt(); + } else { + unattended = DEFAULT_UNATTENDED; + } } public Integer getMaxPageSearchSize() { @@ -220,6 +239,14 @@ public Integer getNumFailureRetriesForUpdate() { return numFailureRetries; } + public Boolean getUnattended() { + return unattended != null ? (unattended == DEFAULT_UNATTENDED) ? null : (unattended > 0) : null; + } + + public Integer getUnattendedForUpdate() { + return unattended; + } + public ActionRequestValidationException validate(ActionRequestValidationException validationException) { if (maxPageSearchSize != null && (maxPageSearchSize < 10 || maxPageSearchSize > MultiBucketConsumerService.DEFAULT_MAX_BUCKETS)) { validationException = addValidationError( @@ -239,6 +266,17 @@ public ActionRequestValidationException validate(ActionRequestValidationExceptio validationException ); } + + // disallow setting unattended to true with explicit num failure retries + if (unattended != null && unattended == 1 && numFailureRetries != null && numFailureRetries > 0) { + validationException = addValidationError( + "settings.num_failure_retries [" + + numFailureRetries + + "] can not be set in unattended mode, unattended retries indefinitely", + validationException + ); + } + return validationException; } @@ -263,6 +301,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_8_4_0)) { out.writeOptionalInt(numFailureRetries); } + if (out.getVersion().onOrAfter(Version.V_8_5_0)) { + out.writeOptionalInt(unattended); + } } @Override @@ -290,6 +331,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (numFailureRetries != null && (numFailureRetries.equals(DEFAULT_NUM_FAILURE_RETRIES) == false)) { builder.field(TransformField.NUM_FAILURE_RETRIES.getPreferredName(), numFailureRetries); } + if (unattended != null && (unattended.equals(DEFAULT_UNATTENDED) == false)) { + builder.field(TransformField.UNATTENDED.getPreferredName(), unattended > 0 ? true : false); + } builder.endObject(); return builder; } @@ -310,7 +354,8 @@ public boolean equals(Object other) { && Objects.equals(alignCheckpoints, that.alignCheckpoints) && Objects.equals(usePit, that.usePit) && Objects.equals(deduceMappings, that.deduceMappings) - && Objects.equals(numFailureRetries, that.numFailureRetries); + && Objects.equals(numFailureRetries, that.numFailureRetries) + && Objects.equals(unattended, that.unattended); } @Override @@ -322,7 +367,8 @@ public int hashCode() { alignCheckpoints, usePit, deduceMappings, - numFailureRetries + numFailureRetries, + unattended ); } @@ -343,6 +389,7 @@ public static class Builder { private Integer usePit; private Integer deduceMappings; private Integer numFailureRetries; + private Integer unattended; /** * Default builder @@ -362,6 +409,7 @@ public Builder(SettingsConfig base) { this.usePit = base.usePit; this.deduceMappings = base.deduceMappings; this.numFailureRetries = base.numFailureRetries; + this.unattended = base.unattended; } /** @@ -455,6 +503,21 @@ public Builder setNumFailureRetries(Integer numFailureRetries) { return this; } + /** + * Whether to run the transform in unattended mode. + * In unattended mode the transform does not immediately fail for errors that are classified + * as irrecoverable. + * + * An explicit `null` resets to default. + * + * @param unattended true if this is a unattended transform. + * @return the {@link Builder} with usePit set. + */ + public Builder setUnattended(Boolean unattended) { + this.unattended = unattended == null ? DEFAULT_UNATTENDED : unattended ? 1 : 0; + return this; + } + /** * Update settings according to given settings config. * @@ -495,6 +558,9 @@ public Builder update(SettingsConfig update) { ? null : update.getNumFailureRetriesForUpdate(); } + if (update.getUnattendedForUpdate() != null) { + this.unattended = update.getUnattendedForUpdate().equals(DEFAULT_UNATTENDED) ? null : update.getUnattendedForUpdate(); + } return this; } @@ -507,7 +573,8 @@ public SettingsConfig build() { alignCheckpoints, usePit, deduceMappings, - numFailureRetries + numFailureRetries, + unattended ); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java index 9ddb989d08e7c..93fba8915699f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java @@ -629,7 +629,8 @@ private static TransformConfig applyRewriteForUpdate(Builder builder) { builder.getSettings().getAlignCheckpoints(), builder.getSettings().getUsePit(), builder.getSettings().getDeduceMappings(), - builder.getSettings().getNumFailureRetries() + builder.getSettings().getNumFailureRetries(), + builder.getSettings().getUnattended() ) ); } @@ -644,7 +645,8 @@ private static TransformConfig applyRewriteForUpdate(Builder builder) { builder.getSettings().getAlignCheckpoints(), builder.getSettings().getUsePit(), builder.getSettings().getDeduceMappings(), - builder.getSettings().getNumFailureRetries() + builder.getSettings().getNumFailureRetries(), + builder.getSettings().getUnattended() ) ); } @@ -659,7 +661,8 @@ private static TransformConfig applyRewriteForUpdate(Builder builder) { false, builder.getSettings().getUsePit(), builder.getSettings().getDeduceMappings(), - builder.getSettings().getNumFailureRetries() + builder.getSettings().getNumFailureRetries(), + builder.getSettings().getUnattended() ) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java index e394fea57db3e..18594bab4d717 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfigTests.java @@ -33,6 +33,8 @@ public class SettingsConfigTests extends AbstractSerializingTransformTestCase xContentToMap(ToXContent xcontent) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); xcontent.toXContent(builder, XContent.EMPTY_PARAMS); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdateTests.java index f379b4d93b068..92660ed1a3fff 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdateTests.java @@ -115,7 +115,15 @@ public void testApply() { TimeValue frequency = TimeValue.timeValueSeconds(10); SyncConfig syncConfig = new TimeSyncConfig("time_field", TimeValue.timeValueSeconds(30)); String newDescription = "new description"; - SettingsConfig settings = new SettingsConfig(4_000, 4_000.400F, true, true, true, true, 10); + SettingsConfig settings = new SettingsConfig.Builder().setMaxPageSearchSize(4_000) + .setRequestsPerSecond(4_000.400F) + .setDatesAsEpochMillis(true) + .setAlignCheckpoints(true) + .setUsePit(true) + .setDeduceMappings(true) + .setNumFailureRetries(10) + .setUnattended(true) + .build(); Map newMetadata = randomMetadata(); RetentionPolicyConfig retentionPolicyConfig = new TimeRetentionPolicyConfig("time_field", new TimeValue(60_000)); update = new TransformConfigUpdate( @@ -204,7 +212,7 @@ public void testApplySettings() { null, null, null, - new SettingsConfig(4_000, null, (Boolean) null, null, null, null, null), + new SettingsConfig.Builder().setMaxPageSearchSize(4_000).build(), null, null ); @@ -223,7 +231,7 @@ public void testApplySettings() { null, null, null, - new SettingsConfig(null, 43.244F, (Boolean) null, null, null, null, null), + new SettingsConfig.Builder().setRequestsPerSecond(43.244F).build(), null, null ); @@ -240,7 +248,7 @@ public void testApplySettings() { null, null, null, - new SettingsConfig(-1, null, (Boolean) null, null, null, null, null), + new SettingsConfig.Builder().setMaxPageSearchSize(null).build(), null, null ); @@ -256,7 +264,7 @@ public void testApplySettings() { null, null, null, - new SettingsConfig(-1, -1F, (Boolean) null, null, null, null, null), + new SettingsConfig.Builder().setMaxPageSearchSize(null).setRequestsPerSecond(null).build(), null, null ); diff --git a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformIT.java b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformIT.java index 94eefd4fb5e78..e2937a9c1de75 100644 --- a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformIT.java +++ b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformIT.java @@ -137,7 +137,7 @@ public void testContinuousTransformCrud() throws Exception { indexName ).setPivotConfig(createPivotConfig(groups, aggs)) .setSyncConfig(new TimeSyncConfig("timestamp", TimeValue.timeValueSeconds(1))) - .setSettings(new SettingsConfig(null, null, null, false, null, null, null)) + .setSettings(new SettingsConfig.Builder().setAlignCheckpoints(false).build()) .build(); putTransform(transformId, Strings.toString(config), RequestOptions.DEFAULT); @@ -391,7 +391,7 @@ public void testContinuousTransformRethrottle() throws Exception { ).setPivotConfig(createPivotConfig(groups, aggs)) .setSyncConfig(new TimeSyncConfig("timestamp", TimeValue.timeValueSeconds(1))) // set requests per second and page size low enough to fail the test if update does not succeed, - .setSettings(new SettingsConfig(10, 1F, null, false, null, null, null)) + .setSettings(new SettingsConfig.Builder().setMaxPageSearchSize(20).setRequestsPerSecond(1F).setAlignCheckpoints(false).build()) .build(); putTransform(transformId, Strings.toString(config), RequestOptions.DEFAULT); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java index 6450e97658b53..7891f6d21d8f6 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java @@ -61,10 +61,8 @@ import java.util.LinkedHashMap; import java.util.Map; import java.util.Map.Entry; -import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.core.Strings.format; @@ -78,9 +76,6 @@ class ClientTransformIndexer extends TransformIndexer { private final AtomicBoolean oldStatsCleanedUp = new AtomicBoolean(false); private final AtomicReference seqNoPrimaryTermAndIndexHolder; - - // protected for unit tests - protected final AtomicInteger statePersistenceFailures = new AtomicInteger(); private final ConcurrentHashMap namedPits = new ConcurrentHashMap<>(); private volatile long pitCheckpoint; private volatile boolean disablePit = false; @@ -292,7 +287,7 @@ protected void persistState(TransformState state, ActionListener listener) seqNoPrimaryTermAndIndex, ActionListener.wrap(r -> { updateSeqNoPrimaryTermAndIndex(seqNoPrimaryTermAndIndex, r); - statePersistenceFailures.set(0); + context.resetStatePersistenceFailureCount(); // Only do this clean up once, if it succeeded, no reason to do the query again. if (oldStatsCleanedUp.compareAndSet(false, true)) { @@ -335,7 +330,7 @@ protected void persistState(TransformState state, ActionListener listener) + statsExc.getMessage() ); - if (handleStatePersistenceFailure(statsExc) == false) { + if (failureHandler.handleStatePersistenceFailure(statsExc, getConfig().getSettings()) == false) { // get the current seqNo and primary term, however ignore the stored state transformsConfigManager.getTransformStoredDoc( transformConfig.getId(), @@ -351,33 +346,13 @@ protected void persistState(TransformState state, ActionListener listener) } else { logger.warn(() -> "[" + transformConfig.getId() + "] updating stats of transform failed.", statsExc); auditor.warning(getJobId(), "Failure updating stats of transform: " + statsExc.getMessage()); - handleStatePersistenceFailure(statsExc); + failureHandler.handleStatePersistenceFailure(statsExc, getConfig().getSettings()); } listener.onFailure(statsExc); }) ); } - private boolean handleStatePersistenceFailure(Exception statsExc) { - // we use the same setting for retries, however a separate counter, because the failure - // counter for search/index gets reset after a successful bulk index request - int numFailureRetries = Optional.ofNullable(transformConfig.getSettings().getNumFailureRetries()) - .orElse(context.getNumFailureRetries()); - - final int failureCount = statePersistenceFailures.incrementAndGet(); - - if (numFailureRetries != -1 && failureCount > numFailureRetries) { - failIndexer( - "task encountered more than " - + numFailureRetries - + " failures updating internal state; latest failure: " - + statsExc.getMessage() - ); - return true; - } - return false; - } - void updateSeqNoPrimaryTermAndIndex(SeqNoPrimaryTermAndIndex expectedValue, SeqNoPrimaryTermAndIndex newValue) { logger.debug(() -> format("[%s] Updated state document from [%s] to [%s]", transformConfig.getId(), expectedValue, newValue)); boolean updated = seqNoPrimaryTermAndIndexHolder.compareAndSet(expectedValue, newValue); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java index c16a310659896..a35899d18bc96 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformContext.java @@ -33,9 +33,11 @@ public interface Listener { private final AtomicInteger failureCount; // Keeps track of the last failure that occured, used for throttling logs and audit private final AtomicReference lastFailure = new AtomicReference<>(); + private final AtomicInteger statePersistenceFailureCount = new AtomicInteger(); private volatile Instant changesLastDetectedAt; private volatile Instant lastSearchTime; private volatile boolean shouldStopAtCheckpoint = false; + private volatile int pageSize = 0; // the checkpoint of this transform, storing the checkpoint until data indexing from source to dest is _complete_ // Note: Each indexer run creates a new future checkpoint which becomes the current checkpoint only after the indexer run finished @@ -137,6 +139,26 @@ public void setShouldStopAtCheckpoint(boolean shouldStopAtCheckpoint) { this.shouldStopAtCheckpoint = shouldStopAtCheckpoint; } + int getPageSize() { + return pageSize; + } + + void setPageSize(int pageSize) { + this.pageSize = pageSize; + } + + void resetStatePersistenceFailureCount() { + statePersistenceFailureCount.set(0); + } + + int getStatePersistenceFailureCount() { + return statePersistenceFailureCount.get(); + } + + int incrementAndGetStatePersistenceFailureCount() { + return statePersistenceFailureCount.incrementAndGet(); + } + void shutdown() { taskListener.shutdown(); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java new file mode 100644 index 0000000000000..63baae88b691e --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java @@ -0,0 +1,275 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform.transforms; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.script.ScriptException; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.utils.ExceptionRootCauseFinder; + +import java.util.Optional; + +import static org.elasticsearch.core.Strings.format; + +/** + * Handles all failures a transform can run into when searching, indexing as well as internal + * state handling. + * + * TODO: + * + * - the settings have to be passed as parameter - because they can change at runtime - longer term the necessary + * parts should be read from the context object instead + */ +class TransformFailureHandler { + private static final Logger logger = LogManager.getLogger(TransformFailureHandler.class); + public static final int LOG_FAILURE_EVERY = 10; + private final TransformAuditor auditor; + private final String transformId; + private final TransformContext context; + + TransformFailureHandler(TransformAuditor auditor, TransformContext context, String transformId) { + this.auditor = auditor; + this.transformId = transformId; + this.context = context; + } + + /** + * Handle a search or indexing failure + * + * @param e the exception caught + * @param settingsConfig The settings + */ + void handleIndexerFailure(Exception e, SettingsConfig settingsConfig) { + // more detailed reporting in the handlers and below + logger.debug(() -> "[" + transformId + "] transform encountered an exception: ", e); + Throwable unwrappedException = ExceptionsHelper.findSearchExceptionRootCause(e); + boolean unattended = Boolean.TRUE.equals(settingsConfig.getUnattended()); + + if (unwrappedException instanceof CircuitBreakingException circuitBreakingException) { + handleCircuitBreakingException(circuitBreakingException, unattended); + } else if (unwrappedException instanceof ScriptException scriptException) { + handleScriptException(scriptException, unattended); + } else if (unwrappedException instanceof BulkIndexingException bulkIndexingException) { + handleBulkIndexingException(bulkIndexingException, unattended, getNumFailureRetries(settingsConfig)); + } else if (unwrappedException instanceof ElasticsearchException elasticsearchException) { + handleElasticsearchException(elasticsearchException, unattended, getNumFailureRetries(settingsConfig)); + } else if (unwrappedException instanceof IllegalArgumentException illegalArgumentException) { + handleIllegalArgumentException(illegalArgumentException, unattended); + } else { + retry( + unwrappedException, + ExceptionRootCauseFinder.getDetailedMessage(unwrappedException), + unattended, + getNumFailureRetries(settingsConfig) + ); + } + } + + /** + * Handle failures persisting internal state + * + * @param e the exception caught + * @param settingsConfig The settings + */ + boolean handleStatePersistenceFailure(Exception e, SettingsConfig settingsConfig) { + // we use the same setting for retries, however a separate counter, because the failure + // counter for search/index gets reset after a successful bulk index request + int numFailureRetries = getNumFailureRetries(settingsConfig); + + final int failureCount = context.incrementAndGetStatePersistenceFailureCount(); + + if (numFailureRetries != -1 && failureCount > numFailureRetries) { + fail( + "task encountered more than " + numFailureRetries + " failures updating internal state; latest failure: " + e.getMessage() + ); + return true; + } + return false; + } + + /** + * Handle the circuit breaking case: A search consumed too much memory and got aborted. + *

+ * Going out of memory we smoothly reduce the page size which reduces memory consumption. + *

+ * Implementation details: We take the values from the circuit breaker as a hint and reduce + * either based on the circuitbreaker value or a log-scale value. + * + * @param circuitBreakingException CircuitBreakingException thrown + * @param unattended whether the transform runs in unattended mode + */ + private void handleCircuitBreakingException(CircuitBreakingException circuitBreakingException, boolean unattended) { + final int pageSize = context.getPageSize(); + double reducingFactor = Math.min( + (double) circuitBreakingException.getByteLimit() / circuitBreakingException.getBytesWanted(), + 1 - (Math.log10(pageSize) * 0.1) + ); + + int newPageSize = (int) Math.round(reducingFactor * pageSize); + + if (newPageSize < TransformIndexer.MINIMUM_PAGE_SIZE) { + String message = TransformMessages.getMessage(TransformMessages.LOG_TRANSFORM_PIVOT_LOW_PAGE_SIZE_FAILURE, pageSize); + if (unattended) { + retry(circuitBreakingException, message, true, -1); + } else { + fail(message); + } + } else { + String message = TransformMessages.getMessage(TransformMessages.LOG_TRANSFORM_PIVOT_REDUCE_PAGE_SIZE, pageSize, newPageSize); + auditor.info(transformId, message); + logger.info("[{}] {}", transformId, message); + context.setPageSize(newPageSize); + } + } + + /** + * Handle script exception case. This is error is irrecoverable. + * + * @param scriptException ScriptException thrown + * @param unattended whether the transform runs in unattended mode + */ + private void handleScriptException(ScriptException scriptException, boolean unattended) { + String message = TransformMessages.getMessage( + TransformMessages.LOG_TRANSFORM_PIVOT_SCRIPT_ERROR, + scriptException.getDetailedMessage(), + scriptException.getScriptStack() + ); + if (unattended) { + retry(scriptException, message, true, -1); + } else { + fail(message); + } + } + + /** + * Handle bulk indexing exception case. This is error can be irrecoverable. + * + * @param bulkIndexingException BulkIndexingException thrown + * @param unattended whether the transform runs in unattended mode + * @param numFailureRetries the number of configured retries + */ + private void handleBulkIndexingException(BulkIndexingException bulkIndexingException, boolean unattended, int numFailureRetries) { + if (unattended == false && bulkIndexingException.isIrrecoverable()) { + String message = TransformMessages.getMessage( + TransformMessages.LOG_TRANSFORM_PIVOT_IRRECOVERABLE_BULK_INDEXING_ERROR, + bulkIndexingException.getDetailedMessage() + ); + fail(message); + } else { + retry(bulkIndexingException, bulkIndexingException.getDetailedMessage(), unattended, numFailureRetries); + } + } + + /** + * Handle a generic elasticsearch exception. This is error can be irrecoverable. + *

+ * The failure is classified using the http status code from the exception. + * + * @param elasticsearchException ElasticsearchException thrown + * @param unattended whether the transform runs in unattended mode + * @param numFailureRetries the number of configured retries + */ + private void handleElasticsearchException(ElasticsearchException elasticsearchException, boolean unattended, int numFailureRetries) { + if (unattended == false && ExceptionRootCauseFinder.IRRECOVERABLE_REST_STATUSES.contains(elasticsearchException.status())) { + String message = "task encountered irrecoverable failure: " + elasticsearchException.getDetailedMessage(); + fail(message); + } else { + retry(elasticsearchException, elasticsearchException.getDetailedMessage(), unattended, numFailureRetries); + } + } + + /** + * Handle a generic illegal argument exception. This is error is irrecoverable. + *

+ * If this exception is caught it is likely a bug somewhere. + * + * @param illegalArgumentException IllegalArgumentException thrown + * @param unattended whether the transform runs in unattended mode + */ + private void handleIllegalArgumentException(IllegalArgumentException illegalArgumentException, boolean unattended) { + if (unattended) { + retry(illegalArgumentException, illegalArgumentException.getMessage(), true, -1); + } else { + String message = "task encountered irrecoverable failure: " + illegalArgumentException.getMessage(); + fail(message); + } + } + + /** + * Terminate failure handling with a retry. + *

+ * In case the number of retries are exhausted - and the transform does not run as unattended - the transform + * might be set to failed. + * + * @param unwrappedException The exception caught + * @param message error message to log/audit + * @param unattended whether the transform runs in unattended mode + * @param numFailureRetries the number of configured retries + */ + private void retry(Throwable unwrappedException, String message, boolean unattended, int numFailureRetries) { + // group failures to decide whether to report it below + final String thisFailureClass = unwrappedException.getClass().toString(); + final String lastFailureClass = context.getLastFailure(); + final int failureCount = context.incrementAndGetFailureCount(thisFailureClass); + + if (unattended == false && numFailureRetries != -1 && failureCount > numFailureRetries) { + fail("task encountered more than " + numFailureRetries + " failures; latest failure: " + message); + return; + } + + // Since our schedule fires again very quickly after failures it is possible to run into the same failure numerous + // times in a row, very quickly. We do not want to spam the audit log with repeated failures, so only record the first one + // and if the number of retries is about to exceed + if (thisFailureClass.equals(lastFailureClass) == false + || failureCount % LOG_FAILURE_EVERY == 0 + || failureCount == numFailureRetries) { + String retryMessage = format( + "Transform encountered an exception: [%s]; Will automatically retry [%d/%d]", + message, + failureCount, + numFailureRetries + ); + logger.warn(() -> "[" + transformId + "] " + retryMessage); + auditor.warning(transformId, retryMessage); + } + } + + /** + * Terminate failure handling by failing the transform. + *

+ * This should be called if the transform does not run unattended and the failure is permanent or after the + * configured number of retries. + * + * @param failureMessage the reason of the failure + */ + private void fail(String failureMessage) { + // note: logging and audit is done as part of context.markAsFailed + context.markAsFailed(failureMessage); + } + + /** + * Get the number of retries. + *

+ * The number of retries are read from the config or if not read from the context which is based on a cluster wide + * default. If the transform runs in unattended mode, the number of retries is always indefinite. + * + * @param settingsConfig the setting config + * @return the number of retries or -1 if retries are indefinite + */ + private int getNumFailureRetries(SettingsConfig settingsConfig) { + return Boolean.TRUE.equals(settingsConfig.getUnattended()) + ? -1 + : Optional.ofNullable(settingsConfig.getNumFailureRetries()).orElse(context.getNumFailureRetries()); + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java index 43500a70a4a46..d69446fff7795 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Tuple; @@ -26,7 +25,6 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.script.ScriptException; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; @@ -48,14 +46,12 @@ import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; import org.elasticsearch.xpack.transform.transforms.Function.ChangeCollector; import org.elasticsearch.xpack.transform.transforms.RetentionPolicyToDeleteByQueryRequestConverter.RetentionPolicyException; -import org.elasticsearch.xpack.transform.utils.ExceptionRootCauseFinder; import java.time.Instant; import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -90,6 +86,7 @@ private enum RunState { protected final TransformConfigManager transformsConfigManager; private final CheckpointProvider checkpointProvider; + protected final TransformFailureHandler failureHandler; private volatile float docsPerSecond = -1; protected final TransformAuditor auditor; @@ -114,7 +111,6 @@ private enum RunState { private Map nextChangeCollectorBucketPosition = null; private volatile Integer initialConfiguredPageSize; - private volatile int pageSize = 0; private volatile long logEvery = 1; private volatile long logCount = 0; private volatile TransformCheckpoint lastCheckpoint; @@ -154,6 +150,7 @@ public TransformIndexer( // give runState a default this.runState = RunState.APPLY_RESULTS; + this.failureHandler = new TransformFailureHandler(auditor, context, transformConfig.getId()); if (transformConfig.getSettings() != null && transformConfig.getSettings().getDocsPerSecond() != null) { docsPerSecond = transformConfig.getSettings().getDocsPerSecond(); } @@ -169,10 +166,6 @@ public TransformIndexer( abstract void persistState(TransformState state, ActionListener listener); - public int getPageSize() { - return pageSize; - } - @Override protected String getJobId() { return transformConfig.getId(); @@ -260,7 +253,7 @@ protected void onStart(long now, ActionListener listener) { ActionListener finalListener = ActionListener.wrap(r -> { try { // if we haven't set the page size yet, if it is set we might have reduced it after running into an out of memory - if (pageSize == 0) { + if (context.getPageSize() == 0) { configurePageSize(getConfig().getSettings().getMaxPageSearchSize()); } @@ -414,7 +407,7 @@ protected void onFinish(ActionListener listener) { } ActionListener failureHandlingListener = ActionListener.wrap(listener::onResponse, failure -> { - handleFailure(failure); + failureHandler.handleIndexerFailure(failure, getConfig().getSettings()); listener.onFailure(failure); }); @@ -505,7 +498,7 @@ private void executeRetentionPolicy(ActionListener listener) { private void finalizeCheckpoint(ActionListener listener) { try { // reset the page size, so we do not memorize a low page size forever - pageSize = function.getInitialPageSize(); + context.setPageSize(function.getInitialPageSize()); // reset the changed bucket to free memory if (changeCollector != null) { changeCollector.clear(); @@ -631,7 +624,7 @@ protected void onFailure(Exception exc) { startIndexerThreadShutdown(); // the failure handler must not throw an exception due to internal problems try { - handleFailure(exc); + failureHandler.handleIndexerFailure(exc, getConfig().getSettings()); } catch (Exception e) { logger.error(() -> "[" + getJobId() + "] transform encountered an unexpected internal exception: ", e); } @@ -922,69 +915,7 @@ void stopAndMaybeSaveState() { * (Note: originally this method was synchronized, which is not necessary) */ void handleFailure(Exception e) { - // more detailed reporting in the handlers and below - logger.debug(() -> "[" + getJobId() + "] transform encountered an exception: ", e); - Throwable unwrappedException = ExceptionsHelper.findSearchExceptionRootCause(e); - - if (unwrappedException instanceof CircuitBreakingException) { - handleCircuitBreakingException((CircuitBreakingException) unwrappedException); - return; - } - - if (unwrappedException instanceof ScriptException) { - handleScriptException((ScriptException) unwrappedException); - return; - } - - if (unwrappedException instanceof BulkIndexingException && ((BulkIndexingException) unwrappedException).isIrrecoverable()) { - handleIrrecoverableBulkIndexingException((BulkIndexingException) unwrappedException); - return; - } - - // irrecoverable error without special handling - if (unwrappedException instanceof ElasticsearchException elasticsearchException) { - if (ExceptionRootCauseFinder.IRRECOVERABLE_REST_STATUSES.contains(elasticsearchException.status())) { - failIndexer("task encountered irrecoverable failure: " + elasticsearchException.getDetailedMessage()); - return; - } - } - - if (unwrappedException instanceof IllegalArgumentException) { - failIndexer("task encountered irrecoverable failure: " + e.getMessage()); - return; - } - - int numFailureRetries = Optional.ofNullable(transformConfig.getSettings().getNumFailureRetries()) - .orElse(context.getNumFailureRetries()); - - // group failures to decide whether to report it below - final String thisFailureClass = unwrappedException.getClass().toString(); - final String lastFailureClass = context.getLastFailure(); - final int failureCount = context.incrementAndGetFailureCount(thisFailureClass); - - if (numFailureRetries != -1 && failureCount > numFailureRetries) { - failIndexer( - "task encountered more than " - + numFailureRetries - + " failures; latest failure: " - + ExceptionRootCauseFinder.getDetailedMessage(unwrappedException) - ); - return; - } - - // Since our schedule fires again very quickly after failures it is possible to run into the same failure numerous - // times in a row, very quickly. We do not want to spam the audit log with repeated failures, so only record the first one - // and if the number of retries is about to exceed - if (thisFailureClass.equals(lastFailureClass) == false || failureCount == numFailureRetries) { - String message = format( - "Transform encountered an exception: [%s]; Will automatically retry [%d/%d]", - ExceptionRootCauseFinder.getDetailedMessage(unwrappedException), - failureCount, - numFailureRetries - ); - logger.warn(() -> "[" + getJobId() + "] " + message); - auditor.warning(getJobId(), message); - } + failureHandler.handleIndexerFailure(e, getConfig().getSettings()); } /** @@ -1143,7 +1074,7 @@ private SearchRequest buildQueryToFindChanges() { request.allowPartialSearchResults(false) // shard failures should fail the request .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); // TODO: make configurable - changeCollector.buildChangesQuery(sourceBuilder, position != null ? position.getBucketsPosition() : null, pageSize); + changeCollector.buildChangesQuery(sourceBuilder, position != null ? position.getBucketsPosition() : null, context.getPageSize()); QueryBuilder queryBuilder = getConfig().getSource().getQueryConfig().getQuery(); @@ -1164,7 +1095,7 @@ private SearchRequest buildQueryToUpdateDestinationIndex() { TransformConfig config = getConfig(); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().runtimeMappings(getConfig().getSource().getRuntimeMappings()); - function.buildSearchQuery(sourceBuilder, position != null ? position.getIndexerPosition() : null, pageSize); + function.buildSearchQuery(sourceBuilder, position != null ? position.getIndexerPosition() : null, context.getPageSize()); SearchRequest request = new SearchRequest(); QueryBuilder queryBuilder = config.getSource().getQueryConfig().getQuery(); @@ -1207,62 +1138,6 @@ private SearchRequest buildQueryToUpdateDestinationIndex() { .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); // TODO: make configurable } - /** - * Handle the circuit breaking case: A search consumed to much memory and got aborted. - * - * Going out of memory we smoothly reduce the page size which reduces memory consumption. - * - * Implementation details: We take the values from the circuit breaker as a hint, but - * note that it breaks early, that's why we also reduce using - * - * @param circuitBreakingException CircuitBreakingException thrown - */ - private void handleCircuitBreakingException(CircuitBreakingException circuitBreakingException) { - double reducingFactor = Math.min( - (double) circuitBreakingException.getByteLimit() / circuitBreakingException.getBytesWanted(), - 1 - (Math.log10(pageSize) * 0.1) - ); - - int newPageSize = (int) Math.round(reducingFactor * pageSize); - - if (newPageSize < MINIMUM_PAGE_SIZE) { - String message = TransformMessages.getMessage(TransformMessages.LOG_TRANSFORM_PIVOT_LOW_PAGE_SIZE_FAILURE, pageSize); - failIndexer(message); - } else { - String message = TransformMessages.getMessage(TransformMessages.LOG_TRANSFORM_PIVOT_REDUCE_PAGE_SIZE, pageSize, newPageSize); - auditor.info(getJobId(), message); - logger.info("[{}] {}", getJobId(), message); - pageSize = newPageSize; - } - } - - /** - * Handle script exception case. This is error is irrecoverable. - * - * @param scriptException ScriptException thrown - */ - private void handleScriptException(ScriptException scriptException) { - String message = TransformMessages.getMessage( - TransformMessages.LOG_TRANSFORM_PIVOT_SCRIPT_ERROR, - scriptException.getDetailedMessage(), - scriptException.getScriptStack() - ); - failIndexer(message); - } - - /** - * Handle permanent bulk indexing exception case. This is error is irrecoverable. - * - * @param bulkIndexingException BulkIndexingException thrown - */ - private void handleIrrecoverableBulkIndexingException(BulkIndexingException bulkIndexingException) { - String message = TransformMessages.getMessage( - TransformMessages.LOG_TRANSFORM_PIVOT_IRRECOVERABLE_BULK_INDEXING_ERROR, - bulkIndexingException.getDetailedMessage() - ); - failIndexer(message); - } - protected void failIndexer(String failureMessage) { // note: logging and audit is done as part of context.markAsFailed context.markAsFailed(failureMessage); @@ -1311,9 +1186,9 @@ private void configurePageSize(Integer newPageSize) { // if the user explicitly set a page size, take it from the config, otherwise let the function decide if (initialConfiguredPageSize != null && initialConfiguredPageSize > 0) { - pageSize = initialConfiguredPageSize; + context.setPageSize(initialConfiguredPageSize); } else { - pageSize = function.getInitialPageSize(); + context.setPageSize(function.getInitialPageSize()); } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java new file mode 100644 index 0000000000000..6a66d0b53fdf9 --- /dev/null +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform.transforms; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.ScriptException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; +import org.elasticsearch.xpack.transform.notifications.MockTransformAuditor; + +import static java.util.Collections.singletonList; + +public class TransformFailureHandlerTests extends ESTestCase { + + static class MockTransformContextListener implements TransformContext.Listener { + + private boolean failed = false; + private int failureCountChangedCounter = 0; + + public void reset() { + failed = false; + failureCountChangedCounter = 0; + } + + @Override + public void shutdown() { + + } + + @Override + public void failureCountChanged() { + failureCountChangedCounter++; + } + + @Override + public void fail(String failureMessage, ActionListener listener) { + failed = true; + } + + public boolean getFailed() { + return failed; + } + + public int getFailureCountChangedCounter() { + return failureCountChangedCounter; + } + } + + public void testUnattended() { + String transformId = randomAlphaOfLength(10); + SettingsConfig settings = new SettingsConfig.Builder().setUnattended(true).build(); + + MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); + MockTransformContextListener contextListener = new MockTransformContextListener(); + TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); + context.setPageSize(500); + + TransformFailureHandler handler = new TransformFailureHandler(auditor, context, transformId); + + handler.handleIndexerFailure( + new SearchPhaseExecutionException( + "query", + "Partial shards failure", + new ShardSearchFailure[] { + new ShardSearchFailure(new CircuitBreakingException("to much memory", 110, 100, CircuitBreaker.Durability.TRANSIENT)) } + ), + settings + ); + + // CBE isn't a failure, but it only affects page size(which we don't test here) + assertFalse(contextListener.getFailed()); + assertEquals(0, contextListener.getFailureCountChangedCounter()); + + assertNoFailure( + handler, + new SearchPhaseExecutionException( + "query", + "Partial shards failure", + new ShardSearchFailure[] { + new ShardSearchFailure( + new ScriptException( + "runtime error", + new ArithmeticException("/ by zero"), + singletonList("stack"), + "test", + "painless" + ) + ) } + ), + contextListener, + settings + ); + assertNoFailure( + handler, + new ElasticsearchStatusException("something really bad happened", RestStatus.INTERNAL_SERVER_ERROR), + contextListener, + settings + ); + assertNoFailure(handler, new IllegalArgumentException("expected apples not oranges"), contextListener, settings); + assertNoFailure(handler, new RuntimeException("the s*** hit the fan"), contextListener, settings); + assertNoFailure(handler, new NullPointerException("NPE"), contextListener, settings); + } + + private void assertNoFailure( + TransformFailureHandler handler, + Exception e, + MockTransformContextListener mockTransformContextListener, + SettingsConfig settings + ) { + handler.handleIndexerFailure(e, settings); + assertFalse(mockTransformContextListener.getFailed()); + assertEquals(1, mockTransformContextListener.getFailureCountChangedCounter()); + mockTransformContextListener.reset(); + } + +} diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java index 53fe8fbc1489a..5a18397dac078 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java @@ -71,7 +71,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -86,12 +85,16 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.matchesRegex; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.matches; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; +/** + * Tests various indexer failure cases. + *

+ * Legacy Warning: These tests have been written before {@link TransformFailureHandler} has been created, + * potentially a lot of these tests can be rewritten. For new test cases use {@link TransformFailureHandlerTests} + * if possible. + */ public class TransformIndexerFailureHandlingTests extends ESTestCase { private Client client; @@ -103,8 +106,6 @@ static class MockedTransformIndexer extends ClientTransformIndexer { private final Function bulkFunction; private final Function deleteByQueryFunction; - private final Consumer failureConsumer; - // used for synchronizing with the test private CountDownLatch latch; @@ -121,8 +122,7 @@ static class MockedTransformIndexer extends ClientTransformIndexer { TransformContext context, Function searchFunction, Function bulkFunction, - Function deleteByQueryFunction, - Consumer failureConsumer + Function deleteByQueryFunction ) { super( threadPool, @@ -148,7 +148,6 @@ static class MockedTransformIndexer extends ClientTransformIndexer { this.searchFunction = searchFunction; this.bulkFunction = bulkFunction; this.deleteByQueryFunction = deleteByQueryFunction; - this.failureConsumer = failureConsumer; } public void initialize() { @@ -227,16 +226,6 @@ protected void onAbort() { fail("onAbort should not be called"); } - @Override - protected void failIndexer(String message) { - if (failureConsumer != null) { - failureConsumer.accept(message); - super.failIndexer(message); - } else { - fail("failIndexer should not be called, received error: " + message); - } - } - @Override void doGetInitialProgress(SearchRequest request, ActionListener responseListener) { responseListener.onResponse( @@ -312,7 +301,7 @@ public void testPageSizeAdapt() throws Exception { randomPivotConfig(), null, randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000), - new SettingsConfig(pageSize, null, (Boolean) null, null, null, null, null), + new SettingsConfig.Builder().setMaxPageSearchSize(pageSize).build(), null, null, null, @@ -340,7 +329,6 @@ public void testPageSizeAdapt() throws Exception { searchFunction, bulkFunction, null, - null, threadPool, ThreadPool.Names.GENERIC, auditor, @@ -354,13 +342,13 @@ public void testPageSizeAdapt() throws Exception { latch.countDown(); assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED)), 10, TimeUnit.MINUTES); - long pageSizeAfterFirstReduction = indexer.getPageSize(); + long pageSizeAfterFirstReduction = context.getPageSize(); assertThat(initialPageSize, greaterThan(pageSizeAfterFirstReduction)); assertThat(pageSizeAfterFirstReduction, greaterThan((long) TransformIndexer.MINIMUM_PAGE_SIZE)); // run indexer a 2nd time final CountDownLatch secondRunLatch = indexer.newLatch(1); - assertEquals(pageSizeAfterFirstReduction, indexer.getPageSize()); + assertEquals(pageSizeAfterFirstReduction, context.getPageSize()); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); // when the indexer thread shuts down, it ignores the trigger, we might have to call it again @@ -371,7 +359,7 @@ public void testPageSizeAdapt() throws Exception { assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED))); // assert that page size has been reduced again - assertThat(pageSizeAfterFirstReduction, greaterThan((long) indexer.getPageSize())); + assertThat(pageSizeAfterFirstReduction, greaterThan((long) context.getPageSize())); assertThat(pageSizeAfterFirstReduction, greaterThan((long) TransformIndexer.MINIMUM_PAGE_SIZE)); } @@ -387,7 +375,7 @@ public void testDoProcessAggNullCheck() { randomPivotConfig(), null, randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000), - new SettingsConfig(pageSize, null, (Boolean) null, null, null, null, null), + new SettingsConfig.Builder().setMaxPageSearchSize(pageSize).build(), null, null, null, @@ -425,7 +413,6 @@ public void testDoProcessAggNullCheck() { searchFunction, bulkFunction, null, - null, threadPool, ThreadPool.Names.GENERIC, auditor, @@ -451,7 +438,7 @@ public void testScriptError() throws Exception { randomPivotConfig(), null, randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000), - new SettingsConfig(pageSize, null, (Boolean) null, null, null, null, null), + new SettingsConfig.Builder().setMaxPageSearchSize(pageSize).build(), null, null, null, @@ -480,13 +467,9 @@ public void testScriptError() throws Exception { final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); final AtomicReference failureMessage = new AtomicReference<>(); - Consumer failureConsumer = message -> { - failIndexerCalled.compareAndSet(false, true); - failureMessage.compareAndSet(null, message); - }; MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); - TransformContext.Listener contextListener = mock(TransformContext.Listener.class); + TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); MockedTransformIndexer indexer = createMockIndexer( @@ -495,7 +478,6 @@ public void testScriptError() throws Exception { searchFunction, bulkFunction, null, - failureConsumer, threadPool, ThreadPool.Names.GENERIC, auditor, @@ -512,11 +494,6 @@ public void testScriptError() throws Exception { latch.countDown(); assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED)), 10, TimeUnit.SECONDS); assertTrue(failIndexerCalled.get()); - verify(contextListener, times(1)).fail( - matches("Failed to execute script with error: \\[.*ArithmeticException: / by zero\\], stack trace: \\[stack\\]"), - any() - ); - assertThat( failureMessage.get(), matchesRegex("Failed to execute script with error: \\[.*ArithmeticException: / by zero\\], stack trace: \\[stack\\]") @@ -580,13 +557,9 @@ public void testRetentionPolicyDeleteByQueryThrowsIrrecoverable() throws Excepti final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); final AtomicReference failureMessage = new AtomicReference<>(); - Consumer failureConsumer = message -> { - failIndexerCalled.compareAndSet(false, true); - failureMessage.compareAndSet(null, message); - }; MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); - TransformContext.Listener contextListener = mock(TransformContext.Listener.class); + TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); MockedTransformIndexer indexer = createMockIndexer( @@ -595,7 +568,6 @@ public void testRetentionPolicyDeleteByQueryThrowsIrrecoverable() throws Excepti searchFunction, bulkFunction, deleteByQueryFunction, - failureConsumer, threadPool, ThreadPool.Names.GENERIC, auditor, @@ -612,11 +584,6 @@ public void testRetentionPolicyDeleteByQueryThrowsIrrecoverable() throws Excepti latch.countDown(); assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED)), 10, TimeUnit.SECONDS); assertTrue(failIndexerCalled.get()); - verify(contextListener, times(1)).fail( - matches("task encountered irrecoverable failure: org.elasticsearch.ElasticsearchParseException: failed to parse date field;.*"), - any() - ); - assertThat( failureMessage.get(), matchesRegex( @@ -679,10 +646,6 @@ public void testRetentionPolicyDeleteByQueryThrowsTemporaryProblem() throws Exce final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); final AtomicReference failureMessage = new AtomicReference<>(); - Consumer failureConsumer = message -> { - failIndexerCalled.compareAndSet(false, true); - failureMessage.compareAndSet(null, message); - }; MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); auditor.addExpectation( @@ -694,7 +657,7 @@ public void testRetentionPolicyDeleteByQueryThrowsTemporaryProblem() throws Exce + " Will automatically retry [1/10]" ) ); - TransformContext.Listener contextListener = mock(TransformContext.Listener.class); + TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); MockedTransformIndexer indexer = createMockIndexer( @@ -703,7 +666,6 @@ public void testRetentionPolicyDeleteByQueryThrowsTemporaryProblem() throws Exce searchFunction, bulkFunction, deleteByQueryFunction, - failureConsumer, threadPool, ThreadPool.Names.GENERIC, auditor, @@ -786,13 +748,9 @@ public SearchResponse apply(SearchRequest searchRequest) { final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); final AtomicReference failureMessage = new AtomicReference<>(); - Consumer failureConsumer = message -> { - failIndexerCalled.compareAndSet(false, true); - failureMessage.compareAndSet(null, message); - }; MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); - TransformContext.Listener contextListener = mock(TransformContext.Listener.class); + TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); MockedTransformIndexer indexer = createMockIndexer( @@ -801,7 +759,6 @@ public SearchResponse apply(SearchRequest searchRequest) { searchFunction, bulkFunction, null, - failureConsumer, threadPool, ThreadPool.Names.GENERIC, auditor, @@ -852,13 +809,9 @@ public void testHandleFailureAuditing() { final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); final AtomicReference failureMessage = new AtomicReference<>(); - Consumer failureConsumer = message -> { - failIndexerCalled.compareAndSet(false, true); - failureMessage.compareAndSet(null, message); - }; MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); - TransformContext.Listener contextListener = mock(TransformContext.Listener.class); + TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); auditor.addExpectation( @@ -911,7 +864,6 @@ public void testHandleFailureAuditing() { searchFunction, bulkFunction, null, - failureConsumer, threadPool, ThreadPool.Names.GENERIC, auditor, @@ -1016,13 +968,9 @@ private void testHandleFailure( final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); final AtomicReference failureMessage = new AtomicReference<>(); - Consumer failureConsumer = message -> { - failIndexerCalled.compareAndSet(false, true); - failureMessage.compareAndSet(null, message); - }; MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); - TransformContext.Listener contextListener = mock(TransformContext.Listener.class); + TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); if (contextNumFailureRetries != null) { context.setNumFailureRetries(contextNumFailureRetries); @@ -1047,7 +995,6 @@ private void testHandleFailure( searchFunction, bulkFunction, null, - failureConsumer, threadPool, ThreadPool.Names.GENERIC, auditor, @@ -1084,7 +1031,6 @@ private MockedTransformIndexer createMockIndexer( Function searchFunction, Function bulkFunction, Function deleteByQueryFunction, - Consumer failureConsumer, ThreadPool threadPool, String executorName, TransformAuditor auditor, @@ -1110,12 +1056,29 @@ private MockedTransformIndexer createMockIndexer( context, searchFunction, bulkFunction, - deleteByQueryFunction, - failureConsumer + deleteByQueryFunction ); indexer.initialize(); return indexer; } + private TransformContext.Listener createContextListener( + final AtomicBoolean failIndexerCalled, + final AtomicReference failureMessage + ) { + return new TransformContext.Listener() { + @Override + public void shutdown() {} + + @Override + public void failureCountChanged() {} + + @Override + public void fail(String message, ActionListener listener) { + assertTrue(failIndexerCalled.compareAndSet(false, true)); + assertTrue(failureMessage.compareAndSet(null, message)); + } + }; + } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java index 82141503795fb..cedd8c8651317 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java @@ -97,7 +97,7 @@ protected boolean triggerSaveState() { } public int getStatePersistenceFailures() { - return statePersistenceFailures.get(); + return context.getStatePersistenceFailureCount(); } } @@ -183,7 +183,8 @@ public void testStatePersistenceErrorHandling() throws InterruptedException { randomBoolean(), randomBoolean(), randomBoolean(), - 2 + 2, + false ) ); AtomicReference state = new AtomicReference<>(TransformTaskState.STARTED); @@ -398,7 +399,8 @@ public void testStatePersistenceRecovery() throws InterruptedException { randomBoolean(), randomBoolean(), randomBoolean(), - 2 + 2, + false ) ); AtomicReference state = new AtomicReference<>(TransformTaskState.STARTED); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java index 90f5ee5a14735..e7d22209e4908 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java @@ -507,7 +507,7 @@ public void testStopAtCheckpointForThrottledTransform() throws Exception { randomPivotConfig(), null, randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000), - new SettingsConfig(null, Float.valueOf(1.0f), (Boolean) null, (Boolean) null, null, null, null), + new SettingsConfig.Builder().setRequestsPerSecond(1.0f).build(), null, null, null, diff --git a/x-pack/plugin/transform/src/test/resources/rest-api-spec/schema/transform_config.schema.json b/x-pack/plugin/transform/src/test/resources/rest-api-spec/schema/transform_config.schema.json index 3687f2601c883..21c2d85a9fd6b 100644 --- a/x-pack/plugin/transform/src/test/resources/rest-api-spec/schema/transform_config.schema.json +++ b/x-pack/plugin/transform/src/test/resources/rest-api-spec/schema/transform_config.schema.json @@ -208,6 +208,13 @@ "$id": "#root/settings/num_failure_retries", "title": "num failure retries", "type": "integer" + }, + "unattended": { + "$id": "#root/settings/unattended", + "title": "unattended", + "type": "boolean", + "description": "run this transform in unattended mode", + "default": false } } }, From 89ff87d20c2b02051c4f82cef006d1fd1f25681a Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 11 Aug 2022 17:07:00 +0200 Subject: [PATCH 177/265] Fix CloseIndexIT.testConcurrentClose (#89173) Closes #88936 --- .../indices/state/CloseIndexIT.java | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java index a847c6e848e29..20016c546d622 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; @@ -20,9 +21,11 @@ import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; @@ -55,6 +58,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -187,7 +191,22 @@ public void testConcurrentClose() throws InterruptedException { .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) .collect(toList()) ); - ensureYellowAndNoInitializingShards(indexName); + + ClusterHealthResponse healthResponse = client().admin() + .cluster() + .prepareHealth(indexName) + .setWaitForYellowStatus() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setWaitForNoInitializingShards(true) + .setWaitForNodes(Integer.toString(cluster().size())) + .setTimeout(TimeValue.timeValueSeconds(60L)) + .get(); + if (healthResponse.isTimedOut()) { + logClusterState(); + } + assertThat(healthResponse.isTimedOut(), equalTo(false)); + assertThat(healthResponse.getIndices().get(indexName).getStatus().value(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW.value())); final CountDownLatch startClosing = new CountDownLatch(1); final Thread[] threads = new Thread[randomIntBetween(2, 5)]; From 9ad91f20e5cf586254e04a5612ea795229da94f5 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 11 Aug 2022 10:54:02 -0700 Subject: [PATCH 178/265] Add Amazon 2022 to platform support testing matrix --- .../elastic+elasticsearch+multijob+platform-support-unix.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml index 62dda744b4c9d..6aec8b04a2f30 100644 --- a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml @@ -14,7 +14,8 @@ name: os values: - "centos-7&&immutable" - - "amazon&&immutable" + - "amazon-2&&immutable" + - "amazon-2022&&immutable" - "debian-10&&immutable" - "debian-11&&immutable" - "opensuse-15-1&&immutable" From e063ce821c29b92bbf486090b88d83502bb9c18e Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Fri, 12 Aug 2022 15:43:11 +1000 Subject: [PATCH 179/265] [DOCS] Separate "user lookup" into its own doc (#88533) When we introduced "authorization delegation" we piggy backed on the implementation and documentation for "run as". The authorization delegation docs just link to "run as" in order to explain which realms support being the target of delegation. However, authorization delegation is now at least as popular as "run as", and forcing people to make sense of the "run as" docs and apply them to their delegation authorization ("authorization_realm") scenario wasn't helpful or clear. This commit moves (and improves) the content for "lookup a user without authentication" into a new page within the authentication section, and links to it from both "run as" and "delegated authorization". Co-authored-by: Adam Locke --- .../security/authentication/overview.asciidoc | 1 + .../authentication/realm-chains.asciidoc | 2 +- .../token-authentication-services.asciidoc | 2 + .../authentication/user-lookup.asciidoc | 66 +++++++++++++++++++ ...figuring-authorization-delegation.asciidoc | 4 +- .../authorization/run-as-privilege.asciidoc | 21 +++--- 6 files changed, 83 insertions(+), 13 deletions(-) create mode 100644 x-pack/docs/en/security/authentication/user-lookup.asciidoc diff --git a/x-pack/docs/en/security/authentication/overview.asciidoc b/x-pack/docs/en/security/authentication/overview.asciidoc index 54e1e1dcbad79..96646d30b6ec0 100644 --- a/x-pack/docs/en/security/authentication/overview.asciidoc +++ b/x-pack/docs/en/security/authentication/overview.asciidoc @@ -51,6 +51,7 @@ include::kerberos-realm.asciidoc[] include::jwt-realm.asciidoc[] include::custom-realm.asciidoc[] include::anonymous-access.asciidoc[] +include::user-lookup.asciidoc[] include::user-cache.asciidoc[] include::saml-guide.asciidoc[leveloffset=+1] include::oidc-guide.asciidoc[leveloffset=+1] diff --git a/x-pack/docs/en/security/authentication/realm-chains.asciidoc b/x-pack/docs/en/security/authentication/realm-chains.asciidoc index 08da78b14442a..0a9370f2589c2 100644 --- a/x-pack/docs/en/security/authentication/realm-chains.asciidoc +++ b/x-pack/docs/en/security/authentication/realm-chains.asciidoc @@ -78,7 +78,7 @@ LDAP group assignments to determine their roles in Elasticsearch. Any realm that supports retrieving users (without needing their credentials) can be used as an _authorization realm_ (that is, its name may appear as one of the -values in the list of `authorization_realms`). See <> for +values in the list of `authorization_realms`). See <> for further explanation on which realms support this. For realms that support this feature, it can be enabled by configuring the diff --git a/x-pack/docs/en/security/authentication/token-authentication-services.asciidoc b/x-pack/docs/en/security/authentication/token-authentication-services.asciidoc index f7cab7e48344d..8e49ab678f087 100644 --- a/x-pack/docs/en/security/authentication/token-authentication-services.asciidoc +++ b/x-pack/docs/en/security/authentication/token-authentication-services.asciidoc @@ -33,6 +33,7 @@ curl -H "Authorization: Bearer AAEAAWVsYXN0aWMvZ...mXQtc2VydmMTpyNXdkYmRib1FTZTl include::service-accounts.asciidoc[tag=service-accounts-usage] -- +[[token-authentication-access-token]] _token-service_:: The token service uses the <> to generate access tokens and refresh tokens based on the OAuth2 specification. @@ -51,6 +52,7 @@ curl -H "Authorization: Bearer dGhpcyBpcyBub3Qx5...F0YS4gZG8gbm90IHRyeSB0byByZWF // NOTCONSOLE -- +[[token-authentication-api-key]] _api-key-service_:: The API key service uses the <> to generate API keys. diff --git a/x-pack/docs/en/security/authentication/user-lookup.asciidoc b/x-pack/docs/en/security/authentication/user-lookup.asciidoc new file mode 100644 index 0000000000000..179abd2de1e5c --- /dev/null +++ b/x-pack/docs/en/security/authentication/user-lookup.asciidoc @@ -0,0 +1,66 @@ +[role="xpack"] +[[user-lookup]] +=== Looking up users without authentication + +{es} <> exist primarily to support +<>. +Some realms authenticate users with a password (such as the +<> and <> realms), and other realms use +more complex authentication protocols (such as the <> and +<> realms). +In each case, the _primary_ purpose of the realm is to establish the identity of +the user who has made a request to the {es} API. + +However, some {es} features need to _look up_ a user without using their credentials. + +- The <> feature executes requests on behalf of + another user. An authenticated user with `run_as` privileges can perform + requests on behalf of another unauthenticated user. + +- The <> feature links two realms + together so that a user who authenticates against one realm can have the roles + and metadata associated with a user from a different realm. + +In each of these cases, a user must first authenticate to one realm and then +{es} will query the second realm to find another user. +The authenticated user credentials are used to authenticate in the first realm only, +The user in the second realm is retrieved by username, without needing credentials. + +When {es} resolves a user using their credentials (as performed in the first realm), +it is known as _user authentication_. + +When {es} resolves a user using the username only (as performed in the second realm), +it is known as _user lookup_. + +See the <> and <> +documentation to learn more about these features, including which realms and authentication +methods support `run_as` or delegated authorization. +In both cases, only the following realms can be used for the user lookup: + +* The reserved, <> and <> realms always +support user lookup. +* The <> realm supports user lookup when the realm is configured +in <>. User lookup is not support +when the realm is configured with `user_dn_templates`. +* User lookup support in the <> realm +requires that the realm be configured with a <> and a +bind password. + +The `pki`, `saml`, `oidc`, `kerberos` and `jwt` realms do not support user +lookup. + +NOTE: If you want to use a realm only for user lookup and prevent users from +authenticating against that realm, you can <> +and set `authentication.enabled` to `false` + +The user lookup feature is an internal capability that is used to implement the +`run-as` and delegated authorization features - there are no APIs for user lookup. +If you wish to test your user lookup configuration, then you can do this with +`run_as`. Use the <> API, authenticate as a +`superuser` (e.g. the builtin `elastic` user) and specify the +<>. + +NOTE: The <> API and <> feature are alternative + ways to retrieve information about a {stack} user. Those APIs are not related + to the user lookup feature. + diff --git a/x-pack/docs/en/security/authorization/configuring-authorization-delegation.asciidoc b/x-pack/docs/en/security/authorization/configuring-authorization-delegation.asciidoc index eda2800dceb1b..11c3f86613500 100644 --- a/x-pack/docs/en/security/authorization/configuring-authorization-delegation.asciidoc +++ b/x-pack/docs/en/security/authorization/configuring-authorization-delegation.asciidoc @@ -4,8 +4,8 @@ In some cases, after the user has been authenticated by a realm, we may want to delegate user lookup and assignment of roles to another realm. -Any realm that supports retrieving users (without needing their credentials) -can be used as an authorization realm. +Any realm that supports <> (without needing the +user's credentials) can be used as an authorization realm. For example, a user that is authenticated by the Kerberos realm can be looked up in the LDAP realm. The LDAP realm takes on responsibility for searching the user diff --git a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc index 5a9fbecf92ab0..093a8814d8f02 100644 --- a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc +++ b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc @@ -30,15 +30,16 @@ support `run_as` delegation. `run_as` user:: -- -For the `run_as` user, the the following realms support delegated -`run_as` lookups by username: `native`, `file`, Active Directory, LDAP. - -NOTE: To support `run_as` in the LDAP realm, you have to run in -<>. For Active Directory, you need -to <>. - -Service tokens, the {es} Token Service, PKI, SAML 2.0, OIDC 1.0, Kerberos, JWT, -and API keys do not support delegated `run_as` lookups. +{es} supports `run_as` for any realm that supports user lookup. +Not all realms support user lookup. Refer to the list of <> +and ensure that the realm you wish to use is configured in a manner that +supports user lookup. + +The `run_as` user must be retrieved from a <> - it is not +possible to run as a +<>, +<> or +<>. -- To submit requests on behalf of other users, you need to have the `run_as` @@ -216,4 +217,4 @@ The `authentication_realm` and `lookup_realm` in the response both specify the `native` realm because both the `admin_user` and `analyst_user` are from that realm. If the two users are in different realms, the values for `authentication_realm` and `lookup_realm` are different (such as `pki` and -`native`). \ No newline at end of file +`native`). From 96febb7d1ae504a669af9b31aac3c77a1b3a676f Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Fri, 12 Aug 2022 16:01:35 +1000 Subject: [PATCH 180/265] Ensure secureString remain open when reloading secure settings (#88922) The reloading secure settings action sends one node level request with password (secureString) to each node. These node level requests share the same secureString instance. This is not a problem when the requests are sent across the wire because the secureString will end up to be independent instances after de/serilization. But when the request is handled by the local node, it skips the de/serialization process. This means when the secureString gets closed, it is closed for all the node level requests. If a node level request has not been sent across wire when the secureString is closed under it, the serialization process will result into error. This PR fixes the bug by letting each Node level request creates a clone of the secureString and have the Nodes level request to track all Node level requests. All copies of secureString (on the coordinate node) will be closed at Nodes request level which is safe because it is after completion of all Node level requests. Resolves: #88887 --- docs/changelog/88922.yaml | 5 ++ .../NodesReloadSecureSettingsRequest.java | 58 ++++++++++++++++--- ...nsportNodesReloadSecureSettingsAction.java | 53 ++++------------- .../RestReloadSecureSettingsAction.java | 2 +- 4 files changed, 69 insertions(+), 49 deletions(-) create mode 100644 docs/changelog/88922.yaml diff --git a/docs/changelog/88922.yaml b/docs/changelog/88922.yaml new file mode 100644 index 0000000000000..4998a2e8c3c38 --- /dev/null +++ b/docs/changelog/88922.yaml @@ -0,0 +1,5 @@ +pr: 88922 +summary: Ensure `secureString` remain open when reloading secure settings +area: Security +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index 9f2b9c597dc7e..d067b43ee95d1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -16,14 +16,18 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.CharArrays; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.transport.TransportRequest; import java.io.IOException; import java.util.Arrays; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; /** * Request for a reload secure settings action */ -public class NodesReloadSecureSettingsRequest extends BaseNodesRequest { +public class NodesReloadSecureSettingsRequest extends BaseNodesRequest implements Releasable { /** * The password is used to re-read and decrypt the contents @@ -70,12 +74,6 @@ public void setSecureStorePassword(SecureString secureStorePassword) { this.secureSettingsPassword = secureStorePassword; } - public void closePassword() { - if (this.secureSettingsPassword != null) { - this.secureSettingsPassword.close(); - } - } - boolean hasPassword() { return this.secureSettingsPassword != null && this.secureSettingsPassword.length() > 0; } @@ -94,4 +92,50 @@ public void writeTo(StreamOutput out) throws IOException { } } } + + // This field is intentionally not part of serialization + private final Set nodeRequests = ConcurrentHashMap.newKeySet(); + + NodeRequest newNodeRequest() { + final NodesReloadSecureSettingsRequest clone = new NodesReloadSecureSettingsRequest(nodesIds()); + if (hasPassword()) { + clone.setSecureStorePassword(getSecureSettingsPassword().clone()); + } + final NodeRequest nodeRequest = new NodeRequest(clone); + nodeRequests.add(nodeRequest); + return nodeRequest; + } + + @Override + public void close() { + if (this.secureSettingsPassword != null) { + this.secureSettingsPassword.close(); + } + nodeRequests.forEach(NodeRequest::close); + } + + public static class NodeRequest extends TransportRequest implements Releasable { + NodesReloadSecureSettingsRequest request; + + NodeRequest(StreamInput in) throws IOException { + super(in); + request = new NodesReloadSecureSettingsRequest(in); + } + + NodeRequest(NodesReloadSecureSettingsRequest request) { + this.request = request; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + request.writeTo(out); + } + + @Override + public void close() { + assert request.nodeRequests.isEmpty() : "potential circular reference"; + request.close(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index e8c8fcfaac969..756e4312784aa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -18,16 +18,13 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.KeyStoreWrapper; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -37,7 +34,7 @@ public class TransportNodesReloadSecureSettingsAction extends TransportNodesAction< NodesReloadSecureSettingsRequest, NodesReloadSecureSettingsResponse, - TransportNodesReloadSecureSettingsAction.NodeRequest, + NodesReloadSecureSettingsRequest.NodeRequest, NodesReloadSecureSettingsResponse.NodeResponse> { private final Environment environment; @@ -59,7 +56,7 @@ public TransportNodesReloadSecureSettingsAction( transportService, actionFilters, NodesReloadSecureSettingsRequest::new, - NodeRequest::new, + NodesReloadSecureSettingsRequest.NodeRequest::new, ThreadPool.Names.GENERIC, NodesReloadSecureSettingsResponse.NodeResponse.class ); @@ -77,8 +74,8 @@ protected NodesReloadSecureSettingsResponse newResponse( } @Override - protected NodeRequest newNodeRequest(NodesReloadSecureSettingsRequest request) { - return new NodeRequest(request); + protected NodesReloadSecureSettingsRequest.NodeRequest newNodeRequest(NodesReloadSecureSettingsRequest request) { + return request.newNodeRequest(); } @Override @@ -93,7 +90,7 @@ protected void doExecute( ActionListener listener ) { if (request.hasPassword() && isNodeLocal(request) == false && isNodeTransportTLSEnabled() == false) { - request.closePassword(); + request.close(); listener.onFailure( new ElasticsearchException( "Secure settings cannot be updated cluster wide when TLS for the transport layer" @@ -101,23 +98,17 @@ protected void doExecute( ) ); } else { - super.doExecute(task, request, ActionListener.wrap(response -> { - request.closePassword(); - listener.onResponse(response); - }, e -> { - request.closePassword(); - listener.onFailure(e); - })); + super.doExecute(task, request, ActionListener.runBefore(listener, request::close)); } } @Override - protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest, Task task) { + protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation( + NodesReloadSecureSettingsRequest.NodeRequest nodeReloadRequest, + Task task + ) { final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request; // We default to using an empty string as the keystore password so that we mimic pre 7.3 API behavior - final SecureString secureSettingsPassword = request.hasPassword() - ? request.getSecureSettingsPassword() - : new SecureString(new char[0]); try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) { // reread keystore from config file if (keystore == null) { @@ -127,7 +118,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque ); } // decrypt the keystore using the password from the request - keystore.decrypt(secureSettingsPassword.getChars()); + keystore.decrypt(request.hasPassword() ? request.getSecureSettingsPassword().getChars() : new char[0]); // add the keystore to the original node settings object final Settings settingsWithKeystore = Settings.builder().put(environment.settings(), false).setSecureSettings(keystore).build(); final List exceptions = new ArrayList<>(); @@ -145,27 +136,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque } catch (final Exception e) { return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), e); } finally { - secureSettingsPassword.close(); - } - } - - public static class NodeRequest extends TransportRequest { - - NodesReloadSecureSettingsRequest request; - - public NodeRequest(StreamInput in) throws IOException { - super(in); - request = new NodesReloadSecureSettingsRequest(in); - } - - NodeRequest(NodesReloadSecureSettingsRequest request) { - this.request = request; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - request.writeTo(out); + request.close(); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java index 8e0c1e403a7e8..86ac7088642d1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -78,7 +78,7 @@ public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XC builder.field("cluster_name", response.getClusterName().value()); response.toXContent(builder, channel.request()); builder.endObject(); - nodesRequestBuilder.request().closePassword(); + nodesRequestBuilder.request().close(); return new RestResponse(RestStatus.OK, builder); } }); From 8dfbcd5c02ca30c8a24f31296b68c312337b407e Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Fri, 12 Aug 2022 16:17:49 +1000 Subject: [PATCH 181/265] Limited-by role descriptors in Get/QueryApiKey response (#89273) An API key's effective permission is an intersection between its assigned role descriptors and a snapshot of its owner user's role descriptors (limited-by role descriptors). In #89166, the assigned role descriptors are now returned by default in Get/Query API key responses. This PR further adds support to optionally return limited-by role descriptors in the responses. Unlike assign role descriptors, an API key cannot view any limited-by role descriptors unless it has manage_api_key or higher privileges. Relates: #89058 --- docs/changelog/89273.yaml | 5 + .../core/security/action/apikey/ApiKey.java | 65 ++- .../action/apikey/GetApiKeyRequest.java | 103 +++- .../action/apikey/QueryApiKeyRequest.java | 20 +- .../authz/RoleDescriptorsIntersection.java | 68 +++ .../ManageOwnApiKeyClusterPrivilege.java | 8 + .../test/SecuritySettingsSourceField.java | 22 +- .../security/action/apikey/ApiKeyTests.java | 39 +- .../action/apikey/GetApiKeyRequestTests.java | 16 +- .../action/apikey/GetApiKeyResponseTests.java | 102 +++- .../apikey/QueryApiKeyRequestTests.java | 13 +- .../apikey/QueryApiKeyResponseTests.java | 13 +- .../RoleDescriptorsIntersectionTests.java | 104 ++++ .../ManageOwnApiKeyClusterPrivilegeTests.java | 78 ++- .../authz/store/ReservedRolesStoreTests.java | 4 +- .../xpack/security/apikey/ApiKeyRestIT.java | 25 + .../security/authc/ApiKeyIntegTests.java | 482 ++++++++++++++---- .../apikey/TransportGetApiKeyAction.java | 5 +- .../apikey/TransportQueryApiKeyAction.java | 2 +- .../xpack/security/authc/ApiKeyService.java | 24 +- .../xpack/security/authz/RBACEngine.java | 3 +- .../action/apikey/RestGetApiKeyAction.java | 12 +- .../action/apikey/RestQueryApiKeyAction.java | 39 +- .../security/authc/ApiKeyServiceTests.java | 11 +- .../xpack/security/authz/RBACEngineTests.java | 10 +- .../apikey/RestGetApiKeyActionTests.java | 39 +- 26 files changed, 1126 insertions(+), 186 deletions(-) create mode 100644 docs/changelog/89273.yaml create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java diff --git a/docs/changelog/89273.yaml b/docs/changelog/89273.yaml new file mode 100644 index 0000000000000..e700e03c66f3f --- /dev/null +++ b/docs/changelog/89273.yaml @@ -0,0 +1,5 @@ +pr: 89273 +summary: Limited-by role descriptors in Get/QueryApiKey response +area: Security +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index 7fdf802b9976a..5b6a85c9169a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -13,17 +13,20 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptorsIntersection; import java.io.IOException; import java.time.Instant; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -43,6 +46,8 @@ public final class ApiKey implements ToXContentObject, Writeable { private final Map metadata; @Nullable private final List roleDescriptors; + @Nullable + private final RoleDescriptorsIntersection limitedBy; public ApiKey( String name, @@ -53,7 +58,34 @@ public ApiKey( String username, String realm, @Nullable Map metadata, - @Nullable List roleDescriptors + @Nullable List roleDescriptors, + @Nullable List limitedByRoleDescriptors + ) { + this( + name, + id, + creation, + expiration, + invalidated, + username, + realm, + metadata, + roleDescriptors, + limitedByRoleDescriptors == null ? null : new RoleDescriptorsIntersection(List.of(Set.copyOf(limitedByRoleDescriptors))) + ); + } + + private ApiKey( + String name, + String id, + Instant creation, + Instant expiration, + boolean invalidated, + String username, + String realm, + @Nullable Map metadata, + @Nullable List roleDescriptors, + @Nullable RoleDescriptorsIntersection limitedBy ) { this.name = name; this.id = id; @@ -66,7 +98,10 @@ public ApiKey( this.username = username; this.realm = realm; this.metadata = metadata == null ? Map.of() : metadata; - this.roleDescriptors = roleDescriptors; + this.roleDescriptors = roleDescriptors != null ? List.copyOf(roleDescriptors) : null; + // This assertion will need to be changed (or removed) when derived keys are properly supported + assert limitedBy == null || limitedBy.roleDescriptorsList().size() == 1 : "can only have one set of limited-by role descriptors"; + this.limitedBy = limitedBy; } public ApiKey(StreamInput in) throws IOException { @@ -89,8 +124,10 @@ public ApiKey(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_8_5_0)) { final List roleDescriptors = in.readOptionalList(RoleDescriptor::new); this.roleDescriptors = roleDescriptors != null ? List.copyOf(roleDescriptors) : null; + this.limitedBy = in.readOptionalWriteable(RoleDescriptorsIntersection::new); } else { this.roleDescriptors = null; + this.limitedBy = null; } } @@ -130,6 +167,10 @@ public List getRoleDescriptors() { return roleDescriptors; } + public RoleDescriptorsIntersection getLimitedBy() { + return limitedBy; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -153,6 +194,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t } builder.endObject(); } + if (limitedBy != null) { + builder.field("limited_by", limitedBy); + } return builder; } @@ -174,12 +218,13 @@ public void writeTo(StreamOutput out) throws IOException { } if (out.getVersion().onOrAfter(Version.V_8_5_0)) { out.writeOptionalCollection(roleDescriptors); + out.writeOptionalWriteable(limitedBy); } } @Override public int hashCode() { - return Objects.hash(name, id, creation, expiration, invalidated, username, realm, metadata, roleDescriptors); + return Objects.hash(name, id, creation, expiration, invalidated, username, realm, metadata, roleDescriptors, limitedBy); } @Override @@ -202,7 +247,8 @@ public boolean equals(Object obj) { && Objects.equals(username, other.username) && Objects.equals(realm, other.realm) && Objects.equals(metadata, other.metadata) - && Objects.equals(roleDescriptors, other.roleDescriptors); + && Objects.equals(roleDescriptors, other.roleDescriptors) + && Objects.equals(limitedBy, other.limitedBy); } @SuppressWarnings("unchecked") @@ -216,7 +262,8 @@ public boolean equals(Object obj) { (String) args[5], (String) args[6], (args[7] == null) ? null : (Map) args[7], - (List) args[8] + (List) args[8], + (RoleDescriptorsIntersection) args[9] ); }); static { @@ -232,6 +279,12 @@ public boolean equals(Object obj) { p.nextToken(); return RoleDescriptor.parse(n, p, false); }, new ParseField("role_descriptors")); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> RoleDescriptorsIntersection.fromXContent(p), + new ParseField("limited_by"), + ObjectParser.ValueType.OBJECT_ARRAY + ); } public static ApiKey fromXContent(XContentParser parser) throws IOException { @@ -258,6 +311,8 @@ public String toString() { + metadata + ", role_descriptors=" + roleDescriptors + + ", limited_by=" + + limitedBy + "]"; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java index 9de4600a1b773..91dd1f937156a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java @@ -30,10 +30,7 @@ public final class GetApiKeyRequest extends ActionRequest { private final String apiKeyId; private final String apiKeyName; private final boolean ownedByAuthenticatedUser; - - public GetApiKeyRequest() { - this(null, null, null, null, false); - } + private final boolean withLimitedBy; public GetApiKeyRequest(StreamInput in) throws IOException { super(in); @@ -46,20 +43,27 @@ public GetApiKeyRequest(StreamInput in) throws IOException { } else { ownedByAuthenticatedUser = false; } + if (in.getVersion().onOrAfter(Version.V_8_5_0)) { + withLimitedBy = in.readBoolean(); + } else { + withLimitedBy = false; + } } - public GetApiKeyRequest( + private GetApiKeyRequest( @Nullable String realmName, @Nullable String userName, @Nullable String apiKeyId, @Nullable String apiKeyName, - boolean ownedByAuthenticatedUser + boolean ownedByAuthenticatedUser, + boolean withLimitedBy ) { this.realmName = textOrNull(realmName); this.userName = textOrNull(userName); this.apiKeyId = textOrNull(apiKeyId); this.apiKeyName = textOrNull(apiKeyName); this.ownedByAuthenticatedUser = ownedByAuthenticatedUser; + this.withLimitedBy = withLimitedBy; } private static String textOrNull(@Nullable String arg) { @@ -86,13 +90,18 @@ public boolean ownedByAuthenticatedUser() { return ownedByAuthenticatedUser; } + public boolean withLimitedBy() { + return withLimitedBy; + } + /** * Creates get API key request for given realm name * @param realmName realm name * @return {@link GetApiKeyRequest} */ + @Deprecated public static GetApiKeyRequest usingRealmName(String realmName) { - return new GetApiKeyRequest(realmName, null, null, null, false); + return new GetApiKeyRequest(realmName, null, null, null, false, false); } /** @@ -100,8 +109,9 @@ public static GetApiKeyRequest usingRealmName(String realmName) { * @param userName user name * @return {@link GetApiKeyRequest} */ + @Deprecated public static GetApiKeyRequest usingUserName(String userName) { - return new GetApiKeyRequest(null, userName, null, null, false); + return new GetApiKeyRequest(null, userName, null, null, false, false); } /** @@ -110,8 +120,9 @@ public static GetApiKeyRequest usingUserName(String userName) { * @param userName user name * @return {@link GetApiKeyRequest} */ + @Deprecated public static GetApiKeyRequest usingRealmAndUserName(String realmName, String userName) { - return new GetApiKeyRequest(realmName, userName, null, null, false); + return new GetApiKeyRequest(realmName, userName, null, null, false, false); } /** @@ -121,8 +132,9 @@ public static GetApiKeyRequest usingRealmAndUserName(String realmName, String us * {@code false} * @return {@link GetApiKeyRequest} */ + @Deprecated public static GetApiKeyRequest usingApiKeyId(String apiKeyId, boolean ownedByAuthenticatedUser) { - return new GetApiKeyRequest(null, null, apiKeyId, null, ownedByAuthenticatedUser); + return new GetApiKeyRequest(null, null, apiKeyId, null, ownedByAuthenticatedUser, false); } /** @@ -133,21 +145,23 @@ public static GetApiKeyRequest usingApiKeyId(String apiKeyId, boolean ownedByAut * @return {@link GetApiKeyRequest} */ public static GetApiKeyRequest usingApiKeyName(String apiKeyName, boolean ownedByAuthenticatedUser) { - return new GetApiKeyRequest(null, null, null, apiKeyName, ownedByAuthenticatedUser); + return new GetApiKeyRequest(null, null, null, apiKeyName, ownedByAuthenticatedUser, false); } /** * Creates get api key request to retrieve api key information for the api keys owned by the current authenticated user. */ + @Deprecated public static GetApiKeyRequest forOwnedApiKeys() { - return new GetApiKeyRequest(null, null, null, null, true); + return new GetApiKeyRequest(null, null, null, null, true, false); } /** * Creates get api key request to retrieve api key information for all api keys if the authenticated user is authorized to do so. */ + @Deprecated public static GetApiKeyRequest forAllApiKeys() { - return new GetApiKeyRequest(); + return GetApiKeyRequest.builder().build(); } @Override @@ -185,6 +199,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_7_4_0)) { out.writeOptionalBoolean(ownedByAuthenticatedUser); } + if (out.getVersion().onOrAfter(Version.V_8_5_0)) { + out.writeBoolean(withLimitedBy); + } } @Override @@ -200,11 +217,67 @@ public boolean equals(Object o) { && Objects.equals(realmName, that.realmName) && Objects.equals(userName, that.userName) && Objects.equals(apiKeyId, that.apiKeyId) - && Objects.equals(apiKeyName, that.apiKeyName); + && Objects.equals(apiKeyName, that.apiKeyName) + && withLimitedBy == that.withLimitedBy; } @Override public int hashCode() { - return Objects.hash(realmName, userName, apiKeyId, apiKeyName, ownedByAuthenticatedUser); + return Objects.hash(realmName, userName, apiKeyId, apiKeyName, ownedByAuthenticatedUser, withLimitedBy); + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private String realmName = null; + private String userName = null; + private String apiKeyId = null; + private String apiKeyName = null; + private boolean ownedByAuthenticatedUser = false; + private boolean withLimitedBy = false; + + public Builder realmName(String realmName) { + this.realmName = realmName; + return this; + } + + public Builder userName(String userName) { + this.userName = userName; + return this; + } + + public Builder apiKeyId(String apiKeyId) { + this.apiKeyId = apiKeyId; + return this; + } + + public Builder apiKeyName(String apiKeyName) { + this.apiKeyName = apiKeyName; + return this; + } + + public Builder ownedByAuthenticatedUser() { + return ownedByAuthenticatedUser(true); + } + + public Builder ownedByAuthenticatedUser(boolean ownedByAuthenticatedUser) { + this.ownedByAuthenticatedUser = ownedByAuthenticatedUser; + return this; + } + + public Builder withLimitedBy() { + return withLimitedBy(true); + } + + public Builder withLimitedBy(boolean withLimitedBy) { + this.withLimitedBy = withLimitedBy; + return this; + } + + public GetApiKeyRequest build() { + return new GetApiKeyRequest(realmName, userName, apiKeyId, apiKeyName, ownedByAuthenticatedUser, withLimitedBy); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequest.java index b5dec8d6d0631..949ceb41b0aea 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequest.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.action.apikey; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; @@ -33,6 +34,7 @@ public final class QueryApiKeyRequest extends ActionRequest { private final List fieldSortBuilders; @Nullable private final SearchAfterBuilder searchAfterBuilder; + private final boolean withLimitedBy; private boolean filterForCurrentUser; public QueryApiKeyRequest() { @@ -40,7 +42,7 @@ public QueryApiKeyRequest() { } public QueryApiKeyRequest(QueryBuilder queryBuilder) { - this(queryBuilder, null, null, null, null); + this(queryBuilder, null, null, null, null, false); } public QueryApiKeyRequest( @@ -48,13 +50,15 @@ public QueryApiKeyRequest( @Nullable Integer from, @Nullable Integer size, @Nullable List fieldSortBuilders, - @Nullable SearchAfterBuilder searchAfterBuilder + @Nullable SearchAfterBuilder searchAfterBuilder, + boolean withLimitedBy ) { this.queryBuilder = queryBuilder; this.from = from; this.size = size; this.fieldSortBuilders = fieldSortBuilders; this.searchAfterBuilder = searchAfterBuilder; + this.withLimitedBy = withLimitedBy; } public QueryApiKeyRequest(StreamInput in) throws IOException { @@ -68,6 +72,11 @@ public QueryApiKeyRequest(StreamInput in) throws IOException { this.fieldSortBuilders = null; } this.searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); + if (in.getVersion().onOrAfter(Version.V_8_5_0)) { + this.withLimitedBy = in.readBoolean(); + } else { + this.withLimitedBy = false; + } } public QueryBuilder getQueryBuilder() { @@ -98,6 +107,10 @@ public void setFilterForCurrentUser() { filterForCurrentUser = true; } + public boolean withLimitedBy() { + return withLimitedBy; + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -123,5 +136,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeList(fieldSortBuilders); } out.writeOptionalWriteable(searchAfterBuilder); + if (out.getVersion().onOrAfter(Version.V_8_5_0)) { + out.writeBoolean(withLimitedBy); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java new file mode 100644 index 0000000000000..2e27995f5cbc8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Set; + +public record RoleDescriptorsIntersection(Collection> roleDescriptorsList) implements ToXContentObject, Writeable { + + public RoleDescriptorsIntersection(StreamInput in) throws IOException { + this(List.copyOf(in.readList(inner -> inner.readSet(RoleDescriptor::new)))); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(roleDescriptorsList, StreamOutput::writeCollection); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(); + { + for (Set roleDescriptors : roleDescriptorsList) { + builder.startObject(); + for (RoleDescriptor roleDescriptor : roleDescriptors) { + builder.field(roleDescriptor.getName(), roleDescriptor); + } + builder.endObject(); + } + } + builder.endArray(); + return builder; + } + + public static RoleDescriptorsIntersection fromXContent(XContentParser xContentParser) throws IOException { + if (xContentParser.currentToken() == null) { + xContentParser.nextToken(); + } + final List> roleDescriptorsList = XContentParserUtils.parseList(xContentParser, p -> { + XContentParser.Token token = p.currentToken(); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, p); + final List roleDescriptors = new ArrayList<>(); + while ((token = p.nextToken()) != XContentParser.Token.END_OBJECT) { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, p); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, p.nextToken(), p); + roleDescriptors.add(RoleDescriptor.parse(p.currentName(), p, false)); + } + return Set.copyOf(roleDescriptors); + }); + return new RoleDescriptorsIntersection(List.copyOf(roleDescriptorsList)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java index c80d6b8ff15c6..42975182c4106 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilege.java @@ -70,6 +70,10 @@ protected boolean extendedCheck(String action, TransportRequest request, Authent // Ownership of an API key, for regular users, is enforced at the service layer. return true; } else if (request instanceof final GetApiKeyRequest getApiKeyRequest) { + // An API key requires manage_api_key privilege or higher to view any limited-by role descriptors + if (authentication.isApiKey() && getApiKeyRequest.withLimitedBy()) { + return false; + } return checkIfUserIsOwnerOfApiKeys( authentication, getApiKeyRequest.getApiKeyId(), @@ -100,6 +104,10 @@ protected boolean extendedCheck(String action, TransportRequest request, Authent ); } } else if (request instanceof final QueryApiKeyRequest queryApiKeyRequest) { + // An API key requires manage_api_key privilege or higher to view any limited-by role descriptors + if (authentication.isApiKey() && queryApiKeyRequest.withLimitedBy()) { + return false; + } return queryApiKeyRequest.isFilterForCurrentUser(); } else if (request instanceof GrantApiKeyRequest) { return false; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/SecuritySettingsSourceField.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/SecuritySettingsSourceField.java index 8c8372b034b54..537aac3497cd7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/SecuritySettingsSourceField.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/SecuritySettingsSourceField.java @@ -7,6 +7,9 @@ package org.elasticsearch.test; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.util.Map; public final class SecuritySettingsSourceField { public static final SecureString TEST_PASSWORD_SECURE_STRING = new SecureString("x-pack-test-password".toCharArray()); @@ -22,9 +25,24 @@ public final class SecuritySettingsSourceField { allow_restricted_indices: true privileges: [ "ALL" ] run_as: [ "*" ] - # The _es_test_root role doesn't have any application privileges because that would require loading data (Application Privileges) - # from the security index, which can causes problems if the index is not available + applications: + - application: "*" + privileges: [ "*" ] + resources: [ "*" ] """; + public static final RoleDescriptor ES_TEST_ROOT_ROLE_DESCRIPTOR = new RoleDescriptor( + "_es_test_root", + new String[] { "ALL" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("ALL").allowRestrictedIndices(true).build() }, + new RoleDescriptor.ApplicationResourcePrivileges[] { + RoleDescriptor.ApplicationResourcePrivileges.builder().application("*").privileges("*").resources("*").build() }, + null, + new String[] { "*" }, + Map.of(), + Map.of("enabled", true) + ); + private SecuritySettingsSourceField() {} } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java index 763bbcff4e026..d01fe7e58bdfb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java @@ -7,13 +7,16 @@ package org.elasticsearch.xpack.core.security.action.apikey; -import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; @@ -31,6 +34,7 @@ public class ApiKeyTests extends ESTestCase { + @SuppressWarnings("unchecked") public void testXContent() throws IOException { final String name = randomAlphaOfLengthBetween(4, 10); final String id = randomAlphaOfLength(20); @@ -44,14 +48,31 @@ public void testXContent() throws IOException { final String realmName = randomAlphaOfLengthBetween(3, 8); final Map metadata = randomMetadata(); final List roleDescriptors = randomBoolean() ? null : randomUniquelyNamedRoleDescriptors(0, 3); + final List limitedByRoleDescriptors = randomUniquelyNamedRoleDescriptors(0, 3); - final ApiKey apiKey = new ApiKey(name, id, creation, expiration, invalidated, username, realmName, metadata, roleDescriptors); + final ApiKey apiKey = new ApiKey( + name, + id, + creation, + expiration, + invalidated, + username, + realmName, + metadata, + roleDescriptors, + limitedByRoleDescriptors + ); // The metadata will never be null because the constructor convert it to empty map if a null is passed in assertThat(apiKey.getMetadata(), notNullValue()); XContentBuilder builder = XContentFactory.jsonBuilder(); apiKey.toXContent(builder, ToXContent.EMPTY_PARAMS); - final Map map = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + final String jsonString = Strings.toString(builder); + final Map map = XContentHelper.convertToMap(JsonXContent.jsonXContent, jsonString, false); + + try (XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, jsonString)) { + assertThat(ApiKey.fromXContent(parser), equalTo(apiKey)); + } assertThat(map.get("name"), equalTo(name)); assertThat(map.get("id"), equalTo(id)); @@ -69,14 +90,22 @@ public void testXContent() throws IOException { if (roleDescriptors == null) { assertThat(map, not(hasKey("role_descriptors"))); } else { - @SuppressWarnings("unchecked") - final Map rdMap = (Map) map.get("role_descriptors"); + final var rdMap = (Map) map.get("role_descriptors"); assertThat(rdMap.size(), equalTo(roleDescriptors.size())); for (var roleDescriptor : roleDescriptors) { assertThat(rdMap, hasKey(roleDescriptor.getName())); assertThat(XContentTestUtils.convertToMap(roleDescriptor), equalTo(rdMap.get(roleDescriptor.getName()))); } } + + final var limitedByList = (List>) map.get("limited_by"); + assertThat(limitedByList.size(), equalTo(1)); + final Map limitedByMap = limitedByList.get(0); + assertThat(limitedByMap.size(), equalTo(limitedByRoleDescriptors.size())); + for (RoleDescriptor roleDescriptor : limitedByRoleDescriptors) { + assertThat(limitedByMap, hasKey(roleDescriptor.getName())); + assertThat(XContentTestUtils.convertToMap(roleDescriptor), equalTo(limitedByMap.get(roleDescriptor.getName()))); + } } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java index f904f3b24848a..989fa8093fa3e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java @@ -75,6 +75,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(apiKeyId); out.writeOptionalString(apiKeyName); out.writeOptionalBoolean(ownedByAuthenticatedUser); + out.writeBoolean(randomBoolean()); } } @@ -151,13 +152,14 @@ public void testSerialization() throws IOException { public void testEmptyStringsAreCoercedToNull() { Supplier randomBlankString = () -> " ".repeat(randomIntBetween(0, 5)); - final GetApiKeyRequest request = new GetApiKeyRequest( - randomBlankString.get(), // realm name - randomBlankString.get(), // user name - randomBlankString.get(), // key id - randomBlankString.get(), // key name - randomBoolean() // owned by user - ); + final GetApiKeyRequest request = GetApiKeyRequest.builder() + .realmName(randomBlankString.get()) + .userName(randomBlankString.get()) + .apiKeyId(randomBlankString.get()) + .apiKeyName(randomBlankString.get()) + .ownedByAuthenticatedUser(randomBoolean()) + .withLimitedBy(randomBoolean()) + .build(); assertThat(request.getRealmName(), nullValue()); assertThat(request.getUserName(), nullValue()); assertThat(request.getApiKeyId(), nullValue()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java index cbc707f335e50..eaedc461137bc 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java @@ -45,7 +45,8 @@ public void testSerialization() throws IOException { randomAlphaOfLength(4), randomAlphaOfLength(5), randomBoolean() ? null : Map.of(randomAlphaOfLengthBetween(3, 8), randomAlphaOfLengthBetween(3, 8)), - randomBoolean() ? null : randomUniquelyNamedRoleDescriptors(0, 3) + randomBoolean() ? null : randomUniquelyNamedRoleDescriptors(0, 3), + randomUniquelyNamedRoleDescriptors(1, 3) ); GetApiKeyResponse response = new GetApiKeyResponse(Collections.singletonList(apiKeyInfo)); @@ -83,6 +84,15 @@ public void testToXContent() throws IOException { new String[] { "foo" } ) ); + final List limitedByRoleDescriptors = List.of( + new RoleDescriptor( + "rd_0", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("index").privileges("all").build() }, + new String[] { "*" } + ) + ); ApiKey apiKeyInfo1 = createApiKeyInfo( "name1", @@ -93,7 +103,8 @@ public void testToXContent() throws IOException { "user-a", "realm-x", null, - null + null, + List.of() // empty limited-by role descriptor to simulate derived keys ); ApiKey apiKeyInfo2 = createApiKeyInfo( "name2", @@ -104,7 +115,8 @@ public void testToXContent() throws IOException { "user-b", "realm-y", Map.of(), - List.of() + List.of(), + limitedByRoleDescriptors ); ApiKey apiKeyInfo3 = createApiKeyInfo( null, @@ -115,7 +127,8 @@ public void testToXContent() throws IOException { "user-c", "realm-z", Map.of("foo", "bar"), - roleDescriptors + roleDescriptors, + limitedByRoleDescriptors ); GetApiKeyResponse response = new GetApiKeyResponse(Arrays.asList(apiKeyInfo1, apiKeyInfo2, apiKeyInfo3)); XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -131,7 +144,10 @@ public void testToXContent() throws IOException { "invalidated": false, "username": "user-a", "realm": "realm-x", - "metadata": {} + "metadata": {}, + "limited_by": [ + { } + ] }, { "id": "id-2", @@ -142,7 +158,35 @@ public void testToXContent() throws IOException { "username": "user-b", "realm": "realm-y", "metadata": {}, - "role_descriptors": {} + "role_descriptors": {}, + "limited_by": [ + { + "rd_0": { + "cluster": [ + "all" + ], + "indices": [ + { + "names": [ + "index" + ], + "privileges": [ + "all" + ], + "allow_restricted_indices": false + } + ], + "applications": [], + "run_as": [ + "*" + ], + "metadata": {}, + "transient_metadata": { + "enabled": true + } + } + } + ] }, { "id": "id-3", @@ -179,7 +223,35 @@ public void testToXContent() throws IOException { "enabled": true } } - } + }, + "limited_by": [ + { + "rd_0": { + "cluster": [ + "all" + ], + "indices": [ + { + "names": [ + "index" + ], + "privileges": [ + "all" + ], + "allow_restricted_indices": false + } + ], + "applications": [], + "run_as": [ + "*" + ], + "metadata": {}, + "transient_metadata": { + "enabled": true + } + } + } + ] } ] }"""))); @@ -194,8 +266,20 @@ private ApiKey createApiKeyInfo( String username, String realm, Map metadata, - List roleDescriptors + List roleDescriptors, + List limitedByRoleDescriptors ) { - return new ApiKey(name, id, creation, expiration, invalidated, username, realm, metadata, roleDescriptors); + return new ApiKey( + name, + id, + creation, + expiration, + invalidated, + username, + realm, + metadata, + roleDescriptors, + limitedByRoleDescriptors + ); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequestTests.java index 1d89166c3570c..6c7df3d4db80c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequestTests.java @@ -72,7 +72,8 @@ public void testReadWrite() throws IOException { new FieldSortBuilder("creation_time").setFormat("strict_date_time").order(SortOrder.DESC), new FieldSortBuilder("username") ), - new SearchAfterBuilder().setSortValues(new String[] { "key-2048", "2021-07-01T00:00:59.000Z" }) + new SearchAfterBuilder().setSortValues(new String[] { "key-2048", "2021-07-01T00:00:59.000Z" }), + randomBoolean() ); try (BytesStreamOutput out = new BytesStreamOutput()) { request3.writeTo(out); @@ -83,6 +84,7 @@ public void testReadWrite() throws IOException { assertThat(deserialized.getSize(), equalTo(request3.getSize())); assertThat(deserialized.getFieldSortBuilders(), equalTo(request3.getFieldSortBuilders())); assertThat(deserialized.getSearchAfterBuilder(), equalTo(request3.getSearchAfterBuilder())); + assertThat(deserialized.withLimitedBy(), equalTo(request3.withLimitedBy())); } } } @@ -93,7 +95,8 @@ public void testValidate() { randomIntBetween(0, Integer.MAX_VALUE), randomIntBetween(0, Integer.MAX_VALUE), null, - null + null, + randomBoolean() ); assertThat(request1.validate(), nullValue()); @@ -102,7 +105,8 @@ public void testValidate() { randomIntBetween(Integer.MIN_VALUE, -1), randomIntBetween(0, Integer.MAX_VALUE), null, - null + null, + randomBoolean() ); assertThat(request2.validate().getMessage(), containsString("[from] parameter cannot be negative")); @@ -111,7 +115,8 @@ public void testValidate() { randomIntBetween(0, Integer.MAX_VALUE), randomIntBetween(Integer.MIN_VALUE, -1), null, - null + null, + randomBoolean() ); assertThat(request3.validate().getMessage(), containsString("[size] parameter cannot be negative")); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java index 2827dbc3de4e3..6fd3c2594ab95 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java @@ -95,7 +95,18 @@ private ApiKey randomApiKeyInfo() { final Instant expiration = randomBoolean() ? Instant.ofEpochMilli(randomMillisUpToYear9999()) : null; final Map metadata = ApiKeyTests.randomMetadata(); final List roleDescriptors = randomFrom(randomUniquelyNamedRoleDescriptors(0, 3), null); - return new ApiKey(name, id, creation, expiration, false, username, realm_name, metadata, roleDescriptors); + return new ApiKey( + name, + id, + creation, + expiration, + false, + username, + realm_name, + metadata, + roleDescriptors, + randomUniquelyNamedRoleDescriptors(1, 3) + ); } private Object[] randomSortValues() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java new file mode 100644 index 0000000000000..6f8691fbb317a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; + +import java.io.IOException; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests.randomUniquelyNamedRoleDescriptors; +import static org.hamcrest.Matchers.equalTo; + +public class RoleDescriptorsIntersectionTests extends ESTestCase { + + public void testSerialization() throws IOException { + final RoleDescriptorsIntersection roleDescriptorsIntersection = new RoleDescriptorsIntersection( + randomList(0, 3, () -> Set.copyOf(randomUniquelyNamedRoleDescriptors(0, 3))) + ); + + final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.ManageApplicationPrivileges.WRITEABLE_NAME, + ConfigurableClusterPrivileges.ManageApplicationPrivileges::createFrom + ), + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.WriteProfileDataPrivileges.WRITEABLE_NAME, + ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom + ) + ) + ); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + roleDescriptorsIntersection.writeTo(output); + try (StreamInput input = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + RoleDescriptorsIntersection deserialized = new RoleDescriptorsIntersection(input); + assertThat(deserialized.roleDescriptorsList(), equalTo(roleDescriptorsIntersection.roleDescriptorsList())); + } + } + } + + public void testXContent() throws IOException { + final RoleDescriptorsIntersection roleDescriptorsIntersection = new RoleDescriptorsIntersection( + List.of( + Set.of(new RoleDescriptor("role_0", new String[] { "monitor" }, null, null)), + Set.of(new RoleDescriptor("role_1", new String[] { "all" }, null, null)) + ) + ); + + final XContentBuilder builder = XContentFactory.jsonBuilder(); + roleDescriptorsIntersection.toXContent(builder, ToXContent.EMPTY_PARAMS); + final String jsonString = Strings.toString(builder); + + assertThat(jsonString, equalTo(XContentHelper.stripWhitespace(""" + [ + { + "role_0": { + "cluster": ["monitor"], + "indices": [], + "applications": [], + "run_as": [], + "metadata": {}, + "transient_metadata": {"enabled": true} + } + }, + { + "role_1": { + "cluster": ["all"], + "indices": [], + "applications": [], + "run_as": [], + "metadata": {}, + "transient_metadata": {"enabled": true} + } + } + ]"""))); + + try (XContentParser p = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, jsonString)) { + assertThat(RoleDescriptorsIntersection.fromXContent(p), equalTo(roleDescriptorsIntersection)); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java index 32f3cee5fb644..6cd18e7935d84 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java @@ -35,20 +35,38 @@ public class ManageOwnApiKeyClusterPrivilegeTests extends ESTestCase { - public void testAuthenticationWithApiKeyAllowsAccessToApiKeyActionsWhenItIsOwner() { + public void testAuthenticationWithApiKeyAllowsAccessToApiKeyActionsWhenItIsItself() { final ClusterPermission clusterPermission = ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()) .build(); final String apiKeyId = randomAlphaOfLengthBetween(4, 7); final User userJoe = new User("joe"); final Authentication authentication = AuthenticationTests.randomApiKeyAuthentication(userJoe, apiKeyId); - final TransportRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyId(apiKeyId, randomBoolean()); + final TransportRequest getApiKeyRequest = GetApiKeyRequest.builder() + .apiKeyId(apiKeyId) + .ownedByAuthenticatedUser(randomBoolean()) + .build(); final TransportRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingApiKeyId(apiKeyId, randomBoolean()); assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/invalidate", invalidateApiKeyRequest, authentication)); assertFalse(clusterPermission.check("cluster:admin/something", mock(TransportRequest.class), authentication)); } + public void testAuthenticationWithApiKeyAllowsDeniesGetApiKeyWithLimitedByWhenItIsItself() { + final ClusterPermission clusterPermission = ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()) + .build(); + final String apiKeyId = randomAlphaOfLengthBetween(4, 7); + final User userJoe = new User("joe"); + final Authentication authentication = AuthenticationTests.randomApiKeyAuthentication(userJoe, apiKeyId); + assertFalse( + clusterPermission.check( + "cluster:admin/xpack/security/api_key/get", + GetApiKeyRequest.builder().apiKeyId(apiKeyId).withLimitedBy().build(), + authentication + ) + ); + } + public void testAuthenticationForUpdateApiKeyAllowsAll() { final ClusterPermission clusterPermission = ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()) .build(); @@ -76,7 +94,11 @@ public void testAuthenticationWithApiKeyDeniesAccessToApiKeyActionsWhenItIsNotOw final String apiKeyId = randomAlphaOfLengthBetween(4, 7); final User userJoe = new User("joe"); final Authentication authentication = AuthenticationTests.randomApiKeyAuthentication(userJoe, randomAlphaOfLength(20)); - final TransportRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyId(apiKeyId, randomBoolean()); + final TransportRequest getApiKeyRequest = GetApiKeyRequest.builder() + .apiKeyId(apiKeyId) + .ownedByAuthenticatedUser(randomBoolean()) + .withLimitedBy(randomBoolean()) + .build(); final TransportRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingApiKeyId(apiKeyId, randomBoolean()); assertFalse(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); @@ -90,7 +112,11 @@ public void testAuthenticationWithUserAllowsAccessToApiKeyActionsWhenItIsOwner() final Authentication.RealmRef realmRef = AuthenticationTests.randomRealmRef(randomBoolean()); final Authentication authentication = AuthenticationTests.randomAuthentication(new User("joe"), realmRef); - TransportRequest getApiKeyRequest = GetApiKeyRequest.usingRealmAndUserName(realmRef.getName(), "joe"); + TransportRequest getApiKeyRequest = GetApiKeyRequest.builder() + .realmName(realmRef.getName()) + .userName("joe") + .withLimitedBy(randomBoolean()) + .build(); TransportRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.usingRealmAndUserName(realmRef.getName(), "joe"); TransportRequest updateApiKeyRequest = UpdateApiKeyRequest.usingApiKeyId(randomAlphaOfLength(10)); assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); @@ -99,7 +125,7 @@ public void testAuthenticationWithUserAllowsAccessToApiKeyActionsWhenItIsOwner() assertFalse(clusterPermission.check("cluster:admin/something", mock(TransportRequest.class), authentication)); - getApiKeyRequest = GetApiKeyRequest.usingRealmAndUserName(realmRef.getName(), "jane"); + getApiKeyRequest = GetApiKeyRequest.builder().realmName(realmRef.getName()).userName("jane").withLimitedBy(randomBoolean()).build(); assertFalse(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); invalidateApiKeyRequest = InvalidateApiKeyRequest.usingRealmAndUserName(realmRef.getName(), "jane"); assertFalse(clusterPermission.check("cluster:admin/xpack/security/api_key/invalidate", invalidateApiKeyRequest, authentication)); @@ -108,7 +134,11 @@ public void testAuthenticationWithUserAllowsAccessToApiKeyActionsWhenItIsOwner() final String otherRealmName; if (realmDomain != null) { for (RealmConfig.RealmIdentifier realmIdentifier : realmDomain.realms()) { - getApiKeyRequest = GetApiKeyRequest.usingRealmAndUserName(realmIdentifier.getName(), "joe"); + getApiKeyRequest = GetApiKeyRequest.builder() + .realmName(realmIdentifier.getName()) + .userName("joe") + .withLimitedBy(randomBoolean()) + .build(); assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); invalidateApiKeyRequest = InvalidateApiKeyRequest.usingRealmAndUserName(realmIdentifier.getName(), "joe"); assertTrue( @@ -122,7 +152,7 @@ public void testAuthenticationWithUserAllowsAccessToApiKeyActionsWhenItIsOwner() } else { otherRealmName = randomValueOtherThan(realmRef.getName(), () -> randomAlphaOfLengthBetween(2, 10)); } - getApiKeyRequest = GetApiKeyRequest.usingRealmAndUserName(otherRealmName, "joe"); + getApiKeyRequest = GetApiKeyRequest.builder().realmName(otherRealmName).userName("joe").withLimitedBy(randomBoolean()).build(); assertFalse(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); invalidateApiKeyRequest = InvalidateApiKeyRequest.usingRealmAndUserName(otherRealmName, "joe"); assertFalse(clusterPermission.check("cluster:admin/xpack/security/api_key/invalidate", invalidateApiKeyRequest, authentication)); @@ -148,7 +178,10 @@ public void testAuthenticationWithUserAllowsAccessToApiKeyActionsWhenItIsOwner_W authentication = AuthenticationTests.randomAuthentication(userJoe, realmRef); } - final TransportRequest getApiKeyRequest = GetApiKeyRequest.forOwnedApiKeys(); + final TransportRequest getApiKeyRequest = GetApiKeyRequest.builder() + .ownedByAuthenticatedUser() + .withLimitedBy(randomBoolean()) + .build(); final TransportRequest invalidateApiKeyRequest = InvalidateApiKeyRequest.forOwnedApiKeys(); assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/get", getApiKeyRequest, authentication)); @@ -177,9 +210,9 @@ public void testAuthenticationWithUserDeniesAccessToApiKeyActionsWhenItIsNotOwne } final TransportRequest getApiKeyRequest = randomFrom( - GetApiKeyRequest.usingRealmAndUserName("realm1", randomAlphaOfLength(7)), - GetApiKeyRequest.usingRealmAndUserName(randomAlphaOfLength(5), "joe"), - new GetApiKeyRequest(randomAlphaOfLength(5), randomAlphaOfLength(7), null, null, false) + GetApiKeyRequest.builder().realmName("realm1").userName(randomAlphaOfLength(7)).withLimitedBy(randomBoolean()).build(), + GetApiKeyRequest.builder().realmName(randomAlphaOfLength(5)).userName("joe").withLimitedBy(randomBoolean()).build(), + GetApiKeyRequest.builder().realmName(randomAlphaOfLength(5)).userName(randomAlphaOfLength(7)).build() ); final TransportRequest invalidateApiKeyRequest = randomFrom( InvalidateApiKeyRequest.usingRealmAndUserName("realm1", randomAlphaOfLength(7)), @@ -203,7 +236,7 @@ public void testGetAndInvalidateApiKeyWillRespectRunAsUser() { assertTrue( clusterPermission.check( "cluster:admin/xpack/security/api_key/get", - GetApiKeyRequest.usingRealmAndUserName("realm_b", "user_b"), + GetApiKeyRequest.builder().realmName("realm_b").userName("user_b").withLimitedBy(randomBoolean()).build(), authentication ) ); @@ -220,16 +253,33 @@ public void testCheckQueryApiKeyRequest() { final ClusterPermission clusterPermission = ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()) .build(); - final QueryApiKeyRequest queryApiKeyRequest = new QueryApiKeyRequest(); + final QueryApiKeyRequest queryApiKeyRequest = new QueryApiKeyRequest(null, null, null, null, null, randomBoolean()); if (randomBoolean()) { queryApiKeyRequest.setFilterForCurrentUser(); } assertThat( - clusterPermission.check(QueryApiKeyAction.NAME, queryApiKeyRequest, AuthenticationTestHelper.builder().build()), + clusterPermission.check( + QueryApiKeyAction.NAME, + queryApiKeyRequest, + randomValueOtherThanMany(Authentication::isApiKey, () -> AuthenticationTestHelper.builder().build()) + ), is(queryApiKeyRequest.isFilterForCurrentUser()) ); } + public void testAuthenticationWithApiKeyAllowsDeniesQueryApiKeyWithLimitedBy() { + final ClusterPermission clusterPermission = ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()) + .build(); + + final boolean withLimitedBy = randomBoolean(); + final QueryApiKeyRequest queryApiKeyRequest = new QueryApiKeyRequest(null, null, null, null, null, withLimitedBy); + queryApiKeyRequest.setFilterForCurrentUser(); + assertThat( + clusterPermission.check(QueryApiKeyAction.NAME, queryApiKeyRequest, AuthenticationTestHelper.builder().apiKey().build(false)), + is(false == queryApiKeyRequest.withLimitedBy()) + ); + } + public void testCheckGrantApiKeyRequestDenied() { final ClusterPermission clusterPermission = ManageOwnApiKeyClusterPrivilege.INSTANCE.buildPermission(ClusterPermission.builder()) .build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index a61aafeb89ccd..4a717c56d4510 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -416,9 +416,9 @@ public void testKibanaSystemRole() { final CreateApiKeyRequest createApiKeyRequest = new CreateApiKeyRequest(randomAlphaOfLength(8), null, null); assertThat(kibanaRole.cluster().check(CreateApiKeyAction.NAME, createApiKeyRequest, authentication), is(true)); // Can only get and query its own API keys - assertThat(kibanaRole.cluster().check(GetApiKeyAction.NAME, new GetApiKeyRequest(), authentication), is(false)); + assertThat(kibanaRole.cluster().check(GetApiKeyAction.NAME, GetApiKeyRequest.forAllApiKeys(), authentication), is(false)); assertThat( - kibanaRole.cluster().check(GetApiKeyAction.NAME, new GetApiKeyRequest(null, null, null, null, true), authentication), + kibanaRole.cluster().check(GetApiKeyAction.NAME, GetApiKeyRequest.builder().ownedByAuthenticatedUser().build(), authentication), is(true) ); final QueryApiKeyRequest queryApiKeyRequest = new QueryApiKeyRequest(); diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index d047e1b4ff6d6..ebc35a9b5a11d 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -32,6 +32,8 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.test.SecuritySettingsSourceField.ES_TEST_ROOT_ROLE; +import static org.elasticsearch.test.SecuritySettingsSourceField.ES_TEST_ROOT_ROLE_DESCRIPTOR; import static org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField.RUN_AS_USER_HEADER; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.contains; @@ -144,14 +146,25 @@ public void testGetApiKeyRoleDescriptors() throws IOException { assertOK(adminClient().performRequest(createApiKeyRequest3)); // Role descriptors are returned by both get and query api key calls + final boolean withLimitedBy = randomBoolean(); final List> apiKeyMaps; if (randomBoolean()) { final Request getApiKeyRequest = new Request("GET", "_security/api_key"); + if (withLimitedBy) { + getApiKeyRequest.addParameter("with_limited_by", "true"); + } else if (randomBoolean()) { + getApiKeyRequest.addParameter("with_limited_by", "false"); + } final Response getApiKeyResponse = adminClient().performRequest(getApiKeyRequest); assertOK(getApiKeyResponse); apiKeyMaps = (List>) responseAsMap(getApiKeyResponse).get("api_keys"); } else { final Request queryApiKeyRequest = new Request("POST", "_security/_query/api_key"); + if (withLimitedBy) { + queryApiKeyRequest.addParameter("with_limited_by", "true"); + } else if (randomBoolean()) { + queryApiKeyRequest.addParameter("with_limited_by", "false"); + } final Response queryApiKeyResponse = adminClient().performRequest(queryApiKeyRequest); assertOK(queryApiKeyResponse); apiKeyMaps = (List>) responseAsMap(queryApiKeyResponse).get("api_keys"); @@ -162,6 +175,18 @@ public void testGetApiKeyRoleDescriptors() throws IOException { final String name = (String) apiKeyMap.get("name"); @SuppressWarnings("unchecked") final var roleDescriptors = (Map) apiKeyMap.get("role_descriptors"); + + if (withLimitedBy) { + final List> limitedBy = (List>) apiKeyMap.get("limited_by"); + assertThat(limitedBy.size(), equalTo(1)); + assertThat( + limitedBy.get(0), + equalTo(Map.of(ES_TEST_ROOT_ROLE, XContentTestUtils.convertToMap(ES_TEST_ROOT_ROLE_DESCRIPTOR))) + ); + } else { + assertThat(apiKeyMap, not(hasKey("limited_by"))); + } + switch (name) { case "k1" -> { assertThat(roleDescriptors, anEmptyMap()); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index a593616b9ba9a..ff20c52923402 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.SecurityIntegTestCase; @@ -65,6 +66,9 @@ import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyResponse; @@ -85,6 +89,7 @@ import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptorsIntersection; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.user.User; @@ -112,6 +117,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -121,6 +127,7 @@ import static org.elasticsearch.test.SecuritySettingsSource.HASHER; import static org.elasticsearch.test.SecuritySettingsSource.TEST_ROLE; import static org.elasticsearch.test.SecuritySettingsSource.TEST_USER_NAME; +import static org.elasticsearch.test.SecuritySettingsSourceField.ES_TEST_ROOT_ROLE_DESCRIPTOR; import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; import static org.elasticsearch.test.TestMatchers.throwableWithMessage; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -134,6 +141,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasKey; @@ -643,15 +651,21 @@ public void testActiveApiKeysWithNoExpirationNeverGetDeletedByRemover() throws E assertThat(invalidateResponse.getPreviouslyInvalidatedApiKeys().size(), equalTo(0)); assertThat(invalidateResponse.getErrors().size(), equalTo(0)); + final boolean withLimitedBy = randomBoolean(); PlainActionFuture getApiKeyResponseListener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().realmName("file").withLimitedBy(withLimitedBy).build(), + getApiKeyResponseListener + ); GetApiKeyResponse response = getApiKeyResponseListener.get(); - verifyGetResponse( + verifyApiKeyInfos( 2, responses, tuple.v2(), List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - response, + withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null, + response.getApiKeyInfos(), Collections.singleton(responses.get(0).getId()), Collections.singletonList(responses.get(1).getId()) ); @@ -685,15 +699,21 @@ public void testGetApiKeysForRealm() throws InterruptedException, ExecutionExcep expectedValidKeyIds = responses.stream().map(o -> o.getId()).collect(Collectors.toSet()); } + final boolean withLimitedBy = randomBoolean(); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), listener); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().realmName("file").withLimitedBy(withLimitedBy).build(), + listener + ); GetApiKeyResponse response = listener.get(); - verifyGetResponse( + verifyApiKeyInfos( noOfApiKeys, responses, tuple.v2(), List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - response, + withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null, + response.getApiKeyInfos(), expectedValidKeyIds, invalidatedApiKeyIds ); @@ -706,15 +726,21 @@ public void testGetApiKeysForUser() throws Exception { Client client = client().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) ); + final boolean withLimitedBy = randomBoolean(); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingUserName(ES_TEST_ROOT_USER), listener); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().userName(ES_TEST_ROOT_USER).withLimitedBy(withLimitedBy).build(), + listener + ); GetApiKeyResponse response = listener.get(); - verifyGetResponse( + verifyApiKeyInfos( noOfApiKeys, responses, tuple.v2(), List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - response, + withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null, + response.getApiKeyInfos(), responses.stream().map(o -> o.getId()).collect(Collectors.toSet()), null ); @@ -726,15 +752,21 @@ public void testGetApiKeysForRealmAndUser() throws InterruptedException, Executi Client client = client().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) ); + final boolean withLimitedBy = randomBoolean(); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmAndUserName("file", ES_TEST_ROOT_USER), listener); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().realmName("file").userName(ES_TEST_ROOT_USER).withLimitedBy(withLimitedBy).build(), + listener + ); GetApiKeyResponse response = listener.get(); - verifyGetResponse( + verifyApiKeyInfos( 1, responses, tuple.v2(), List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - response, + withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null, + response.getApiKeyInfos(), Collections.singleton(responses.get(0).getId()), null ); @@ -746,15 +778,21 @@ public void testGetApiKeysForApiKeyId() throws InterruptedException, ExecutionEx Client client = client().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) ); + final boolean withLimitedBy = randomBoolean(); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(responses.get(0).getId(), false), listener); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().apiKeyId(responses.get(0).getId()).withLimitedBy(withLimitedBy).build(), + listener + ); GetApiKeyResponse response = listener.get(); - verifyGetResponse( + verifyApiKeyInfos( 1, responses, tuple.v2(), List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - response, + withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null, + response.getApiKeyInfos(), Collections.singleton(responses.get(0).getId()), null ); @@ -779,30 +817,41 @@ public void testGetApiKeysForApiKeyName() throws InterruptedException, Execution final List createApiKeyResponses2 = tuple2.v1(); Client client = client().filterWithHeader(headers); + final boolean withLimitedBy = randomBoolean(); PlainActionFuture listener = new PlainActionFuture<>(); @SuppressWarnings("unchecked") List responses = randomFrom(createApiKeyResponses1, createApiKeyResponses2); List> metadatas = responses == createApiKeyResponses1 ? tuple1.v2() : tuple2.v2(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName(responses.get(0).getName(), false), listener); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().apiKeyName(responses.get(0).getName()).withLimitedBy(withLimitedBy).build(), + listener + ); // role descriptors are the same between randomization - verifyGetResponse( + verifyApiKeyInfos( 1, responses, metadatas, List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - listener.get(), + withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null, + listener.get().getApiKeyInfos(), Collections.singleton(responses.get(0).getId()), null ); PlainActionFuture listener2 = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName("test-key*", false), listener2); - verifyGetResponse( + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().apiKeyName("test-key*").withLimitedBy(withLimitedBy).build(), + listener2 + ); + verifyApiKeyInfos( noOfApiKeys, createApiKeyResponses1, tuple1.v2(), List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - listener2.get(), + withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null, + listener2.get().getApiKeyInfos(), createApiKeyResponses1.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toSet()), null ); @@ -812,31 +861,54 @@ public void testGetApiKeysForApiKeyName() throws InterruptedException, Execution ); PlainActionFuture listener3 = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName("*", false), listener3); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().apiKeyName("*").withLimitedBy(withLimitedBy).build(), + listener3 + ); responses = Stream.concat(createApiKeyResponses1.stream(), createApiKeyResponses2.stream()).collect(Collectors.toList()); metadatas = Stream.concat(tuple1.v2().stream(), tuple2.v2().stream()).collect(Collectors.toList()); - verifyGetResponse( + verifyApiKeyInfos( 2 * noOfApiKeys, responses, metadatas, List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - listener3.get(), + withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null, + listener3.get().getApiKeyInfos(), responses.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toSet()), null ); PlainActionFuture listener4 = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName("does-not-exist*", false), listener4); - verifyGetResponse(0, Collections.emptyList(), null, List.of(), listener4.get(), Collections.emptySet(), null); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().apiKeyName("does-not-exist*").withLimitedBy(withLimitedBy).build(), + listener4 + ); + verifyApiKeyInfos( + 0, + Collections.emptyList(), + null, + List.of(), + List.of(), + listener4.get().getApiKeyInfos(), + Collections.emptySet(), + null + ); PlainActionFuture listener5 = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName("another-test-key*", false), listener5); - verifyGetResponse( + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().apiKeyName("another-test-key*").withLimitedBy(withLimitedBy).build(), + listener5 + ); + verifyApiKeyInfos( noOfApiKeys, createApiKeyResponses2, tuple2.v2(), List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - listener5.get(), + withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null, + listener5.get().getApiKeyInfos(), createApiKeyResponses2.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toSet()), null ); @@ -858,16 +930,37 @@ public void testGetApiKeysOwnedByCurrentAuthenticatedUser() throws InterruptedEx Collections.singletonMap("Authorization", basicAuthHeaderValue(userWithManageApiKeyRole, TEST_PASSWORD_SECURE_STRING)) ); + final boolean withLimitedBy = randomBoolean(); + final List expectedLimitedByRoleDescriptors; + if (withLimitedBy) { + if (userWithManageApiKeyRole.equals("user_with_manage_api_key_role")) { + expectedLimitedByRoleDescriptors = List.of( + new RoleDescriptor("manage_api_key_role", new String[] { "manage_api_key" }, null, null) + ); + } else { + expectedLimitedByRoleDescriptors = List.of( + new RoleDescriptor("manage_own_api_key_role", new String[] { "manage_own_api_key" }, null, null) + ); + } + } else { + expectedLimitedByRoleDescriptors = null; + } + PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.forOwnedApiKeys(), listener); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().ownedByAuthenticatedUser().withLimitedBy(withLimitedBy).build(), + listener + ); GetApiKeyResponse response = listener.get(); - verifyGetResponse( + verifyApiKeyInfos( userWithManageApiKeyRole, noOfApiKeysForUserWithManageApiKeyRole, userWithManageApiKeyRoleApiKeys, tuple.v2(), List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - response, + expectedLimitedByRoleDescriptors, + response.getApiKeyInfos(), userWithManageApiKeyRoleApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()), null ); @@ -886,16 +979,24 @@ public void testGetApiKeysOwnedByRunAsUserWhenOwnerIsTrue() throws ExecutionExce "monitor" ); List userWithManageOwnApiKeyRoleApiKeys = tuple.v1(); + final boolean withLimitedBy = randomBoolean(); PlainActionFuture listener = new PlainActionFuture<>(); - getClientForRunAsUser().execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.forOwnedApiKeys(), listener); + getClientForRunAsUser().execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().ownedByAuthenticatedUser().withLimitedBy(withLimitedBy).build(), + listener + ); GetApiKeyResponse response = listener.get(); - verifyGetResponse( + verifyApiKeyInfos( "user_with_manage_own_api_key_role", noOfApiKeysForUserWithManageApiKeyRole, userWithManageOwnApiKeyRoleApiKeys, tuple.v2(), List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - response, + withLimitedBy + ? List.of(new RoleDescriptor("manage_own_api_key_role", new String[] { "manage_own_api_key" }, null, null)) + : null, + response.getApiKeyInfos(), userWithManageOwnApiKeyRoleApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()), null ); @@ -914,20 +1015,24 @@ public void testGetApiKeysOwnedByRunAsUserWhenRunAsUserInfoIsGiven() throws Exec "monitor" ); List userWithManageOwnApiKeyRoleApiKeys = tuple.v1(); + final boolean withLimitedBy = randomBoolean(); PlainActionFuture listener = new PlainActionFuture<>(); getClientForRunAsUser().execute( GetApiKeyAction.INSTANCE, - GetApiKeyRequest.usingRealmAndUserName("file", "user_with_manage_own_api_key_role"), + GetApiKeyRequest.builder().realmName("file").userName("user_with_manage_own_api_key_role").withLimitedBy(withLimitedBy).build(), listener ); GetApiKeyResponse response = listener.get(); - verifyGetResponse( + verifyApiKeyInfos( "user_with_manage_own_api_key_role", noOfApiKeysForUserWithManageApiKeyRole, userWithManageOwnApiKeyRoleApiKeys, tuple.v2(), List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - response, + withLimitedBy + ? List.of(new RoleDescriptor("manage_own_api_key_role", new String[] { "manage_own_api_key" }, null, null)) + : null, + response.getApiKeyInfos(), userWithManageOwnApiKeyRoleApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()), null ); @@ -988,8 +1093,9 @@ public void testGetAllApiKeys() throws InterruptedException, ExecutionException final Client client = client().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue("user_with_manage_api_key_role", TEST_PASSWORD_SECURE_STRING)) ); + final boolean withLimitedBy = randomBoolean(); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, new GetApiKeyRequest(), listener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.builder().withLimitedBy(withLimitedBy).build(), listener); GetApiKeyResponse response = listener.get(); int totalApiKeys = noOfSuperuserApiKeys + noOfApiKeysForUserWithManageApiKeyRole + noOfApiKeysForUserWithManageOwnApiKeyRole; List allApiKeys = new ArrayList<>(); @@ -997,14 +1103,32 @@ public void testGetAllApiKeys() throws InterruptedException, ExecutionException final List> metadatas = Stream.of(defaultUserTuple.v2(), userWithManageTuple.v2(), userWithManageOwnTuple.v2()) .flatMap(List::stream) .collect(Collectors.toList()); - verifyGetResponse( + + final Function> expectedLimitedByRoleDescriptorsLookup = username -> { + if (withLimitedBy) { + return switch (username) { + case ES_TEST_ROOT_USER -> List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR); + case "user_with_manage_api_key_role" -> List.of( + new RoleDescriptor("manage_api_key_role", new String[] { "manage_api_key" }, null, null) + ); + case "user_with_manage_own_api_key_role" -> List.of( + new RoleDescriptor("manage_own_api_key_role", new String[] { "manage_own_api_key" }, null, null) + ); + default -> throw new IllegalStateException("unknown username: " + username); + }; + } else { + return null; + } + }; + verifyApiKeyInfos( new String[] { ES_TEST_ROOT_USER, "user_with_manage_api_key_role", "user_with_manage_own_api_key_role" }, totalApiKeys, allApiKeys, metadatas, List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR), - response, - allApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()), + expectedLimitedByRoleDescriptorsLookup, + response.getApiKeyInfos(), + allApiKeys.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toSet()), null ); } @@ -1032,7 +1156,7 @@ public void testGetAllApiKeysFailsForUserWithNoRoleOrRetrieveOwnApiKeyRole() thr Collections.singletonMap("Authorization", basicAuthHeaderValue(withUser, TEST_PASSWORD_SECURE_STRING)) ); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, new GetApiKeyRequest(), listener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.forAllApiKeys(), listener); ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, () -> listener.actionGet()); assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/get", withUser); } @@ -1143,20 +1267,35 @@ public void testApiKeyAuthorizationApiKeyMustBeAbleToRetrieveItsOwnInformationBu .encodeToString((responses.get(0).getId() + ":" + responses.get(0).getKey().toString()).getBytes(StandardCharsets.UTF_8)); Client client = client().filterWithHeader(Map.of("Authorization", "ApiKey " + base64ApiKeyKeyValue)); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(responses.get(0).getId(), randomBoolean()), listener); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().apiKeyId(responses.get(0).getId()).ownedByAuthenticatedUser(randomBoolean()).build(), + listener + ); GetApiKeyResponse response = listener.get(); - verifyGetResponse( + verifyApiKeyInfos( 1, responses, tuple.v2(), List.of(new RoleDescriptor(DEFAULT_API_KEY_ROLE_DESCRIPTOR.getName(), Strings.EMPTY_ARRAY, null, null)), - response, + null, + response.getApiKeyInfos(), Collections.singleton(responses.get(0).getId()), null ); - final PlainActionFuture failureListener = new PlainActionFuture<>(); + // It cannot retrieve its own limited-by role descriptors + final PlainActionFuture future2 = new PlainActionFuture<>(); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().apiKeyId(responses.get(0).getId()).ownedByAuthenticatedUser(randomBoolean()).withLimitedBy().build(), + future2 + ); + final ElasticsearchSecurityException e2 = expectThrows(ElasticsearchSecurityException.class, future2::actionGet); + assertErrorMessage(e2, "cluster:admin/xpack/security/api_key/get", ES_TEST_ROOT_USER, responses.get(0).getId()); + // for any other API key id, it must deny access + final PlainActionFuture failureListener = new PlainActionFuture<>(); client.execute( GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(responses.get(1).getId(), randomBoolean()), @@ -1171,6 +1310,117 @@ public void testApiKeyAuthorizationApiKeyMustBeAbleToRetrieveItsOwnInformationBu assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/get", ES_TEST_ROOT_USER, responses.get(0).getId()); } + public void testApiKeyViewLimitedBy() { + // 1. An API key with manage_own_api_key + final Tuple, List>> tuple1 = createApiKeys( + ES_TEST_ROOT_USER, + 1, + null, + "manage_own_api_key" + ); + final List responses1 = tuple1.v1(); + final String apiKeyId1 = responses1.get(0).getId(); + final Client client1 = client().filterWithHeader( + Map.of( + "Authorization", + "ApiKey " + + Base64.getEncoder() + .encodeToString((apiKeyId1 + ":" + responses1.get(0).getKey().toString()).getBytes(StandardCharsets.UTF_8)) + ) + ); + + // Can view itself without limited-by + verifyApiKeyInfos( + 1, + responses1, + tuple1.v2(), + List.of(new RoleDescriptor(DEFAULT_API_KEY_ROLE_DESCRIPTOR.getName(), new String[] { "manage_own_api_key" }, null, null)), + null, + new ApiKey[] { getApiKeyInfo(client1, apiKeyId1, false, randomBoolean()) }, + Collections.singleton(apiKeyId1), + null + ); + + // Cannot view itself with limited-by + final boolean useGetApiKey = randomBoolean(); + final var e2 = expectThrows(ElasticsearchSecurityException.class, () -> getApiKeyInfo(client1, apiKeyId1, true, useGetApiKey)); + assertErrorMessage(e2, "cluster:admin/xpack/security/api_key/" + (useGetApiKey ? "get" : "query"), ES_TEST_ROOT_USER, apiKeyId1); + + // 2. An API key with manage_api_key can view its own limited-by or any other key's limited-by + final Tuple, List>> tuple3 = createApiKeys( + ES_TEST_ROOT_USER, + 1, + null, + "manage_api_key" + ); + final List responses3 = tuple3.v1(); + final String apiKeyId3 = responses3.get(0).getId(); + final Client client3 = client().filterWithHeader( + Map.of( + "Authorization", + "ApiKey " + + Base64.getEncoder() + .encodeToString((apiKeyId3 + ":" + responses3.get(0).getKey().toString()).getBytes(StandardCharsets.UTF_8)) + ) + ); + + // View its own limited-by + verifyApiKeyInfos( + 1, + responses3, + tuple3.v2(), + List.of(new RoleDescriptor(DEFAULT_API_KEY_ROLE_DESCRIPTOR.getName(), new String[] { "manage_api_key" }, null, null)), + List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR), + new ApiKey[] { getApiKeyInfo(client3, apiKeyId3, true, randomBoolean()) }, + Collections.singleton(apiKeyId3), + null + ); + + // View other key's limited-by + verifyApiKeyInfos( + 1, + responses1, + tuple1.v2(), + List.of(new RoleDescriptor(DEFAULT_API_KEY_ROLE_DESCRIPTOR.getName(), new String[] { "manage_own_api_key" }, null, null)), + List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR), + new ApiKey[] { getApiKeyInfo(client3, apiKeyId1, true, randomBoolean()) }, + Collections.singleton(apiKeyId1), + null + ); + } + + public void testLegacySuperuserLimitedByWillBeReturnedAsTransformed() throws Exception { + final Tuple> createdApiKey = createApiKey(TEST_USER_NAME, null); + final var apiKeyId = createdApiKey.v1().getId(); + final ServiceWithNodeName serviceWithNodeName = getServiceWithNodeName(); + final Authentication authentication = Authentication.newRealmAuthentication( + new User(TEST_USER_NAME, TEST_ROLE), + new Authentication.RealmRef("file", "file", serviceWithNodeName.nodeName()) + ); + // Force set user role descriptors to 7.x legacy superuser role descriptors + assertSingleUpdate( + apiKeyId, + updateApiKeys( + serviceWithNodeName.service(), + authentication, + BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId), + Set.of(ApiKeyService.LEGACY_SUPERUSER_ROLE_DESCRIPTOR) + ) + ); + // raw document has the legacy superuser role descriptor + expectRoleDescriptorsForApiKey( + "limited_by_role_descriptors", + Set.of(ApiKeyService.LEGACY_SUPERUSER_ROLE_DESCRIPTOR), + getApiKeyDocument(apiKeyId) + ); + + final ApiKey apiKeyInfo = getApiKeyInfo(client(), apiKeyId, true, randomBoolean()); + assertThat( + apiKeyInfo.getLimitedBy().roleDescriptorsList().iterator().next(), + equalTo(Set.of(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR)) + ); + } + public void testApiKeyWithManageOwnPrivilegeIsAbleToInvalidateItselfButNotAnyOtherKeysCreatedBySameOwner() throws InterruptedException, ExecutionException { List responses = createApiKeys(ES_TEST_ROOT_USER, 2, null, "manage_own_api_key").v1(); @@ -1285,6 +1535,18 @@ public void testDerivedKeys() throws ExecutionException, InterruptedException { assertNotNull(key100Response.getId()); assertNotNull(key100Response.getKey()); + // Derive keys have empty limited-by role descriptors + final PlainActionFuture future = new PlainActionFuture<>(); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().apiKeyId(key100Response.getId()).withLimitedBy().build(), + future + ); + assertThat(future.actionGet().getApiKeyInfos().length, equalTo(1)); + final RoleDescriptorsIntersection limitedBy = future.actionGet().getApiKeyInfos()[0].getLimitedBy(); + assertThat(limitedBy.roleDescriptorsList().size(), equalTo(1)); + assertThat(limitedBy.roleDescriptorsList().iterator().next(), emptyIterable()); + // Check at the end to allow sometime for the operation to happen. Since an erroneous creation is // asynchronous so that the document is not available immediately. assertApiKeyNotCreated(client, "key-2"); @@ -1546,7 +1808,7 @@ public void testUpdateApiKeysForSingleKey() throws Exception { final var expectedMetadata = request.getMetadata() != null ? request.getMetadata() : createdApiKey.v2(); final var expectedRoleDescriptors = nullRoleDescriptors ? List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR) : newRoleDescriptors; - expectAttributesForApiKey( + doTestApiKeyHasExpectedAttributes( apiKeyId, Map.of( ApiKeyAttribute.CREATOR, @@ -1609,7 +1871,7 @@ public void testBulkUpdateApiKeysForMultipleKeys() throws ExecutionException, In ) ); for (String apiKeyId : apiKeyIds) { - expectAttributesForApiKey( + doTestApiKeyHasExpectedAttributes( apiKeyId, Map.of( ApiKeyAttribute.METADATA, @@ -1641,7 +1903,7 @@ public void testBulkUpdateApiKeysForMultipleKeys() throws ExecutionException, In assertThat(response.getNoops(), containsInAnyOrder(apiKeyIds.toArray())); assertThat(response.getErrorDetails().keySet(), containsInAnyOrder(notFoundIds.toArray())); for (String apiKeyId : apiKeyIds) { - expectAttributesForApiKey( + doTestApiKeyHasExpectedAttributes( apiKeyId, Map.of( ApiKeyAttribute.METADATA, @@ -1817,7 +2079,10 @@ public void testUpdateApiKeysAutoUpdatesUserFields() throws Exception { "all" ).v1().get(0); final String apiKeyId = createdApiKey.getId(); - expectAttributesForApiKey(apiKeyId, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(roleDescriptorBeforeUpdate))); + doTestApiKeyHasExpectedAttributes( + apiKeyId, + Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(roleDescriptorBeforeUpdate)) + ); final List newClusterPrivileges = randomValueOtherThan(clusterPrivileges, () -> { final List privs = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names())); @@ -1839,7 +2104,7 @@ public void testUpdateApiKeysAutoUpdatesUserFields() throws Exception { assertNotNull(response); assertTrue(response.isUpdated()); - expectAttributesForApiKey(apiKeyId, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(roleDescriptorAfterUpdate))); + doTestApiKeyHasExpectedAttributes(apiKeyId, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(roleDescriptorAfterUpdate))); // Update user role name only final RoleDescriptor roleDescriptorWithNewName = putRoleWithClusterPrivileges( @@ -1865,7 +2130,7 @@ public void testUpdateApiKeysAutoUpdatesUserFields() throws Exception { expectedCreator.put("metadata", updatedUser.metadata()); expectedCreator.put("realm_type", "native"); expectedCreator.put("realm", "index"); - expectAttributesForApiKey( + doTestApiKeyHasExpectedAttributes( apiKeyId, Map.of(ApiKeyAttribute.CREATOR, expectedCreator, ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(roleDescriptorWithNewName)) ); @@ -2181,7 +2446,7 @@ public void testUpdateApiKeysAutoUpdatesLegacySuperuserRoleDescriptor() throws E currentSuperuserRoleDescriptors ) ); - expectAttributesForApiKey(apiKeyId, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, currentSuperuserRoleDescriptors)); + doTestApiKeyHasExpectedAttributes(apiKeyId, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, currentSuperuserRoleDescriptors)); // Second update is noop because role descriptors were auto-updated by the previous request assertSingleNoop( apiKeyId, @@ -2299,35 +2564,40 @@ private enum ApiKeyAttribute { // Check attributes with both the raw document and the get api key response whenever possible @SuppressWarnings("unchecked") - private void expectAttributesForApiKey(String apiKeyId, Map attributes) throws IOException { + private void doTestApiKeyHasExpectedAttributes(String apiKeyId, Map attributes) throws IOException { final Map apiKeyDocMap = getApiKeyDocument(apiKeyId); - final PlainActionFuture future = new PlainActionFuture<>(); - client().execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyId(apiKeyId, false), future); - final GetApiKeyResponse getApiKeyResponse = future.actionGet(); - assertThat(getApiKeyResponse.getApiKeyInfos(), arrayWithSize(1)); - final ApiKey apiKeyInfo = getApiKeyResponse.getApiKeyInfos()[0]; - + final boolean useGetApiKey = randomBoolean(); + final ApiKey apiKeyInfo = getApiKeyInfo(client(), apiKeyId, true, useGetApiKey); for (Map.Entry entry : attributes.entrySet()) { switch (entry.getKey()) { case CREATOR -> { final var creatorMap = (Map) entry.getValue(); expectCreatorForApiKey(creatorMap, apiKeyDocMap); - assertThat(creatorMap.get("principal"), equalTo(apiKeyInfo.getUsername())); - assertThat(creatorMap.get("realm"), equalTo(apiKeyInfo.getRealm())); + assertThat("useGetApiKey: " + useGetApiKey, creatorMap.get("principal"), equalTo(apiKeyInfo.getUsername())); + assertThat("useGetApiKey: " + useGetApiKey, creatorMap.get("realm"), equalTo(apiKeyInfo.getRealm())); } case METADATA -> { final var metadata = (Map) entry.getValue(); expectMetadataForApiKey(metadata, apiKeyDocMap); - assertThat(metadata, equalTo(apiKeyInfo.getMetadata())); + assertThat("useGetApiKey: " + useGetApiKey, metadata, equalTo(apiKeyInfo.getMetadata())); } case ASSIGNED_ROLE_DESCRIPTORS -> { final var expectedRoleDescriptors = (Collection) entry.getValue(); expectRoleDescriptorsForApiKey("role_descriptors", expectedRoleDescriptors, apiKeyDocMap); - assertThat(expectedRoleDescriptors, containsInAnyOrder(apiKeyInfo.getRoleDescriptors().toArray(RoleDescriptor[]::new))); + assertThat( + "useGetApiKey: " + useGetApiKey, + expectedRoleDescriptors, + containsInAnyOrder(apiKeyInfo.getRoleDescriptors().toArray(RoleDescriptor[]::new)) + ); } case LIMITED_BY_ROLE_DESCRIPTORS -> { final var expectedRoleDescriptors = (Collection) entry.getValue(); expectRoleDescriptorsForApiKey("limited_by_role_descriptors", expectedRoleDescriptors, apiKeyDocMap); + assertThat( + "useGetApiKey: " + useGetApiKey, + expectedRoleDescriptors, + containsInAnyOrder(apiKeyInfo.getLimitedBy().roleDescriptorsList().iterator().next().toArray(RoleDescriptor[]::new)) + ); } default -> throw new IllegalStateException("unexpected attribute name"); } @@ -2336,7 +2606,7 @@ private void expectAttributesForApiKey(String apiKeyId, Map apiKeyIds, Map attributes) throws IOException { for (String apiKeyId : apiKeyIds) { - expectAttributesForApiKey(apiKeyId, attributes); + doTestApiKeyHasExpectedAttributes(apiKeyId, attributes); } } @@ -2387,6 +2657,30 @@ private Map getApiKeyDocument(String apiKeyId) { return client().execute(GetAction.INSTANCE, new GetRequest(SECURITY_MAIN_ALIAS, apiKeyId)).actionGet().getSource(); } + private ApiKey getApiKeyInfo(Client client, String apiKeyId, boolean withLimitedBy, boolean useGetApiKey) { + if (useGetApiKey) { + final PlainActionFuture future = new PlainActionFuture<>(); + client.execute( + GetApiKeyAction.INSTANCE, + GetApiKeyRequest.builder().apiKeyId(apiKeyId).withLimitedBy(withLimitedBy).build(), + future + ); + final GetApiKeyResponse getApiKeyResponse = future.actionGet(); + assertThat(getApiKeyResponse.getApiKeyInfos(), arrayWithSize(1)); + return getApiKeyResponse.getApiKeyInfos()[0]; + } else { + final PlainActionFuture future = new PlainActionFuture<>(); + client.execute( + QueryApiKeyAction.INSTANCE, + new QueryApiKeyRequest(QueryBuilders.idsQuery().addIds(apiKeyId), null, null, null, null, withLimitedBy), + future + ); + final QueryApiKeyResponse queryApiKeyResponse = future.actionGet(); + assertThat(queryApiKeyResponse.getItems(), arrayWithSize(1)); + return queryApiKeyResponse.getItems()[0].getApiKey(); + } + } + private ServiceWithNodeName getServiceWithNodeName() { final var nodeName = randomFrom(internalCluster().getNodeNames()); final var service = internalCluster().getInstance(ApiKeyService.class, nodeName); @@ -2439,65 +2733,70 @@ private void assertApiKeyNotCreated(Client client, String keyName) throws Execut ); } - private void verifyGetResponse( + private void verifyApiKeyInfos( int expectedNumberOfApiKeys, List responses, List> metadatas, List expectedRoleDescriptors, - GetApiKeyResponse response, + List expectedLimitedByRoleDescriptors, + ApiKey[] apiKeyInfos, Set validApiKeyIds, List invalidatedApiKeyIds ) { - verifyGetResponse( + verifyApiKeyInfos( ES_TEST_ROOT_USER, expectedNumberOfApiKeys, responses, metadatas, expectedRoleDescriptors, - response, + expectedLimitedByRoleDescriptors, + apiKeyInfos, validApiKeyIds, invalidatedApiKeyIds ); } - private void verifyGetResponse( + private void verifyApiKeyInfos( String user, int expectedNumberOfApiKeys, List responses, List> metadatas, List expectedRoleDescriptors, - GetApiKeyResponse response, + List expectedLimitedByRoleDescriptors, + ApiKey[] apiKeyInfos, Set validApiKeyIds, List invalidatedApiKeyIds ) { - verifyGetResponse( + verifyApiKeyInfos( new String[] { user }, expectedNumberOfApiKeys, responses, metadatas, expectedRoleDescriptors, - response, + (ignore) -> expectedLimitedByRoleDescriptors, + apiKeyInfos, validApiKeyIds, invalidatedApiKeyIds ); } - private void verifyGetResponse( + private void verifyApiKeyInfos( String[] user, int expectedNumberOfApiKeys, List responses, List> metadatas, List expectedRoleDescriptors, - GetApiKeyResponse response, + Function> expectedLimitedByRoleDescriptorsLookup, + ApiKey[] apiKeyInfos, Set validApiKeyIds, List invalidatedApiKeyIds ) { - assertThat(response.getApiKeyInfos().length, equalTo(expectedNumberOfApiKeys)); + assertThat(apiKeyInfos.length, equalTo(expectedNumberOfApiKeys)); List expectedIds = responses.stream() .filter(o -> validApiKeyIds.contains(o.getId())) .map(o -> o.getId()) .collect(Collectors.toList()); - List actualIds = Arrays.stream(response.getApiKeyInfos()) + List actualIds = Arrays.stream(apiKeyInfos) .filter(o -> o.isInvalidated() == false) .map(o -> o.getId()) .collect(Collectors.toList()); @@ -2506,19 +2805,19 @@ private void verifyGetResponse( .filter(o -> validApiKeyIds.contains(o.getId())) .map(o -> o.getName()) .collect(Collectors.toList()); - List actualNames = Arrays.stream(response.getApiKeyInfos()) + List actualNames = Arrays.stream(apiKeyInfos) .filter(o -> o.isInvalidated() == false) .map(o -> o.getName()) .collect(Collectors.toList()); assertThat(actualNames, containsInAnyOrder(expectedNames.toArray(Strings.EMPTY_ARRAY))); Set expectedUsernames = (validApiKeyIds.isEmpty()) ? Collections.emptySet() : Set.of(user); - Set actualUsernames = Arrays.stream(response.getApiKeyInfos()) + Set actualUsernames = Arrays.stream(apiKeyInfos) .filter(o -> o.isInvalidated() == false) .map(o -> o.getUsername()) .collect(Collectors.toSet()); assertThat(actualUsernames, containsInAnyOrder(expectedUsernames.toArray(Strings.EMPTY_ARRAY))); if (invalidatedApiKeyIds != null) { - List actualInvalidatedApiKeyIds = Arrays.stream(response.getApiKeyInfos()) + List actualInvalidatedApiKeyIds = Arrays.stream(apiKeyInfos) .filter(o -> o.isInvalidated()) .map(o -> o.getId()) .collect(Collectors.toList()); @@ -2531,18 +2830,25 @@ private void verifyGetResponse( (m, i) -> m.put(responses.get(i).getId(), metadatas.get(i)), HashMap::putAll ); - for (ApiKey apiKey : response.getApiKeyInfos()) { + for (ApiKey apiKey : apiKeyInfos) { final Map metadata = idToMetadata.get(apiKey.getId()); assertThat(apiKey.getMetadata(), equalTo(metadata == null ? Map.of() : metadata)); } } - Arrays.stream(response.getApiKeyInfos()) - .forEach( - apiKeyInfo -> assertThat( - apiKeyInfo.getRoleDescriptors(), - containsInAnyOrder(expectedRoleDescriptors.toArray(RoleDescriptor[]::new)) - ) + Arrays.stream(apiKeyInfos).forEach(apiKeyInfo -> { + assertThat(apiKeyInfo.getRoleDescriptors(), containsInAnyOrder(expectedRoleDescriptors.toArray(RoleDescriptor[]::new))); + final List expectedLimitedByRoleDescriptors = expectedLimitedByRoleDescriptorsLookup.apply( + apiKeyInfo.getUsername() ); + if (expectedLimitedByRoleDescriptors == null) { + assertThat(apiKeyInfo.getLimitedBy(), nullValue()); + } else { + assertThat( + apiKeyInfo.getLimitedBy().roleDescriptorsList().iterator().next(), + containsInAnyOrder(expectedLimitedByRoleDescriptors.toArray(RoleDescriptor[]::new)) + ); + } + }); } private Tuple> createApiKey(String user, TimeValue expiration) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGetApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGetApiKeyAction.java index f63b7256c8628..92d28ba601fb5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGetApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGetApiKeyAction.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.SecurityContext; @@ -34,7 +33,7 @@ public TransportGetApiKeyAction( ApiKeyService apiKeyService, SecurityContext context ) { - super(GetApiKeyAction.NAME, transportService, actionFilters, (Writeable.Reader) GetApiKeyRequest::new); + super(GetApiKeyAction.NAME, transportService, actionFilters, GetApiKeyRequest::new); this.apiKeyService = apiKeyService; this.securityContext = context; } @@ -58,7 +57,7 @@ protected void doExecute(Task task, GetApiKeyRequest request, ActionListener { if (apiKeys.isEmpty()) { @@ -1576,6 +1577,7 @@ private void maybeStartApiKeyRemover() { * @param username user name * @param apiKeyName API key name * @param apiKeyIds API key ids + * @param withLimitedBy whether to parse and return the limited by role descriptors * @param listener listener for {@link GetApiKeyResponse} */ public void getApiKeys( @@ -1583,6 +1585,7 @@ public void getApiKeys( String username, String apiKeyName, String[] apiKeyIds, + boolean withLimitedBy, ActionListener listener ) { ensureEnabled(); @@ -1593,7 +1596,7 @@ public void getApiKeys( apiKeyIds, false, false, - this::convertSearchHitToApiKeyInfo, + hit -> convertSearchHitToApiKeyInfo(hit, withLimitedBy), ActionListener.wrap(apiKeyInfos -> { if (apiKeyInfos.isEmpty()) { logger.debug( @@ -1611,7 +1614,7 @@ public void getApiKeys( ); } - public void queryApiKeys(SearchRequest searchRequest, ActionListener listener) { + public void queryApiKeys(SearchRequest searchRequest, boolean withLimitedBy, ActionListener listener) { ensureEnabled(); final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); @@ -1636,7 +1639,7 @@ public void queryApiKeys(SearchRequest searchRequest, ActionListener apiKeyItem = Arrays.stream(searchResponse.getHits().getHits()) - .map(this::convertSearchHitToQueryItem) + .map(hit -> convertSearchHitToQueryItem(hit, withLimitedBy)) .toList(); listener.onResponse(new QueryApiKeyResponse(total, apiKeyItem)); }, listener::onFailure) @@ -1645,11 +1648,15 @@ public void queryApiKeys(SearchRequest searchRequest, ActionListener metadata = apiKeyDoc.metadataFlattened != null @@ -1662,6 +1669,10 @@ private ApiKey convertSearchHitToApiKeyInfo(SearchHit hit) { RoleReference.ApiKeyRoleType.ASSIGNED ); + final List limitedByRoleDescriptors = withLimitedBy + ? parseRoleDescriptorsBytes(apiKeyId, apiKeyDoc.limitedByRoleDescriptorsBytes, RoleReference.ApiKeyRoleType.LIMITED_BY) + : null; + return new ApiKey( apiKeyDoc.name, apiKeyId, @@ -1671,7 +1682,8 @@ private ApiKey convertSearchHitToApiKeyInfo(SearchHit hit) { (String) apiKeyDoc.creator.get("principal"), (String) apiKeyDoc.creator.get("realm"), metadata, - roleDescriptors + roleDescriptors, + limitedByRoleDescriptors ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index 29c57cd7caf0a..6881794b7e64c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -207,7 +207,8 @@ static boolean checkSameUserPermissions(String action, TransportRequest request, // if the authentication is an API key then the request must also contain same API key id String authenticatedApiKeyId = (String) authentication.getMetadata().get(AuthenticationField.API_KEY_ID_KEY); if (Strings.hasText(getApiKeyRequest.getApiKeyId())) { - return getApiKeyRequest.getApiKeyId().equals(authenticatedApiKeyId); + // An API key requires manage_api_key privilege or higher to view any limited-by role descriptors + return getApiKeyRequest.getApiKeyId().equals(authenticatedApiKeyId) && false == getApiKeyRequest.withLimitedBy(); } else { return false; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java index f12488a7c9f10..5e89def6dd11a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java @@ -46,8 +46,16 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien final String userName = request.param("username"); final String realmName = request.param("realm_name"); final boolean myApiKeysOnly = request.paramAsBoolean("owner", false); - final GetApiKeyRequest getApiKeyRequest = new GetApiKeyRequest(realmName, userName, apiKeyId, apiKeyName, myApiKeysOnly); - return channel -> client.execute(GetApiKeyAction.INSTANCE, getApiKeyRequest, new RestBuilderListener(channel) { + final boolean withLimitedBy = request.paramAsBoolean("with_limited_by", false); + final GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.builder() + .realmName(realmName) + .userName(userName) + .apiKeyId(apiKeyId) + .apiKeyName(apiKeyName) + .ownedByAuthenticatedUser(myApiKeysOnly) + .withLimitedBy(withLimitedBy) + .build(); + return channel -> client.execute(GetApiKeyAction.INSTANCE, getApiKeyRequest, new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(GetApiKeyResponse getApiKeyResponse, XContentBuilder builder) throws Exception { getApiKeyResponse.toXContent(builder, channel.request()); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyAction.java index 7d1bd96d550c2..b59337a64ad2f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; @@ -37,15 +38,9 @@ public final class RestQueryApiKeyAction extends ApiKeyBaseRestHandler { @SuppressWarnings("unchecked") - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "query_api_key_request", - a -> new QueryApiKeyRequest( - (QueryBuilder) a[0], - (Integer) a[1], - (Integer) a[2], - (List) a[3], - (SearchAfterBuilder) a[4] - ) + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "query_api_key_request_payload", + a -> new Payload((QueryBuilder) a[0], (Integer) a[1], (Integer) a[2], (List) a[3], (SearchAfterBuilder) a[4]) ); static { @@ -93,10 +88,30 @@ public String getName() { @Override protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final QueryApiKeyRequest queryApiKeyRequest = request.hasContentOrSourceParam() - ? PARSER.parse(request.contentOrSourceParamParser(), null) - : new QueryApiKeyRequest(); + final boolean withLimitedBy = request.paramAsBoolean("with_limited_by", false); + final QueryApiKeyRequest queryApiKeyRequest; + if (request.hasContentOrSourceParam()) { + final Payload payload = PARSER.parse(request.contentOrSourceParamParser(), null); + queryApiKeyRequest = new QueryApiKeyRequest( + payload.queryBuilder, + payload.from, + payload.size, + payload.fieldSortBuilders, + payload.searchAfterBuilder, + withLimitedBy + ); + } else { + queryApiKeyRequest = new QueryApiKeyRequest(null, null, null, null, null, withLimitedBy); + } return channel -> client.execute(QueryApiKeyAction.INSTANCE, queryApiKeyRequest, new RestToXContentListener<>(channel)); } + + private record Payload( + @Nullable QueryBuilder queryBuilder, + @Nullable Integer from, + @Nullable Integer size, + @Nullable List fieldSortBuilders, + @Nullable SearchAfterBuilder searchAfterBuilder + ) {} } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 56b9d81057aed..6efa86a60e8b0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -242,7 +242,7 @@ public void testGetApiKeys() throws Exception { String apiKeyName = randomFrom(randomAlphaOfLengthBetween(3, 8), null); String[] apiKeyIds = generateRandomStringArray(4, 4, true, true); PlainActionFuture getApiKeyResponsePlainActionFuture = new PlainActionFuture<>(); - service.getApiKeys(realmNames, username, apiKeyName, apiKeyIds, getApiKeyResponsePlainActionFuture); + service.getApiKeys(realmNames, username, apiKeyName, apiKeyIds, randomBoolean(), getApiKeyResponsePlainActionFuture); final BoolQueryBuilder boolQuery = QueryBuilders.boolQuery().filter(QueryBuilders.termQuery("doc_type", "api_key")); if (realmNames != null && realmNames.length > 0) { if (realmNames.length == 1) { @@ -832,7 +832,14 @@ public void testApiKeyServiceDisabled() throws Exception { ElasticsearchException e = expectThrows( ElasticsearchException.class, - () -> service.getApiKeys(new String[] { randomAlphaOfLength(6) }, randomAlphaOfLength(8), null, null, new PlainActionFuture<>()) + () -> service.getApiKeys( + new String[] { randomAlphaOfLength(6) }, + randomAlphaOfLength(8), + null, + null, + randomBoolean(), + new PlainActionFuture<>() + ) ); assertThat(e, instanceOf(FeatureNotEnabledException.class)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index a5066e9eac2ba..90bd5499f1afb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -302,10 +302,18 @@ public void testSameUserPermissionAllowsSelfApiKeyInfoRetrievalWhenAuthenticated final User user = new User("joe"); final String apiKeyId = randomAlphaOfLengthBetween(4, 7); final Authentication authentication = AuthenticationTests.randomApiKeyAuthentication(user, apiKeyId); - final TransportRequest request = GetApiKeyRequest.usingApiKeyId(apiKeyId, false); + final TransportRequest request = GetApiKeyRequest.builder().apiKeyId(apiKeyId).build(); assertTrue(RBACEngine.checkSameUserPermissions(GetApiKeyAction.NAME, request, authentication)); } + public void testSameUserPermissionDeniesSelfApiKeyInfoRetrievalWithLimitedByWhenAuthenticatedByApiKey() { + final User user = new User("joe"); + final String apiKeyId = randomAlphaOfLengthBetween(4, 7); + final Authentication authentication = AuthenticationTests.randomApiKeyAuthentication(user, apiKeyId); + final TransportRequest request = GetApiKeyRequest.builder().apiKeyId(apiKeyId).withLimitedBy(true).build(); + assertFalse(RBACEngine.checkSameUserPermissions(GetApiKeyAction.NAME, request, authentication)); + } + public void testSameUserPermissionDeniesApiKeyInfoRetrievalWhenAuthenticatedByADifferentApiKey() { final User user = new User("joe"); final String apiKeyId = randomAlphaOfLengthBetween(4, 7); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index bc070cbe4e354..86c3e8a53bd49 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -75,6 +75,14 @@ public void testGetApiKey() throws Exception { final Map param4 = mapBuilder().put("id", "api-key-id-1").map(); final Map param5 = mapBuilder().put("name", "api-key-name-1").map(); final Map params = randomFrom(param1, param2, param3, param4, param5); + final boolean withLimitedBy = randomBoolean(); + if (withLimitedBy) { + params.put("with_limited_by", "true"); + } else { + if (randomBoolean()) { + params.put("with_limited_by", "false"); + } + } final boolean replyEmptyResponse = rarely(); final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(params).build(); @@ -90,9 +98,21 @@ public void sendResponse(RestResponse restResponse) { @SuppressWarnings("unchecked") final Map metadata = ApiKeyTests.randomMetadata(); final List roleDescriptors = randomUniquelyNamedRoleDescriptors(0, 3); + final List limitedByRoleDescriptors = withLimitedBy ? randomUniquelyNamedRoleDescriptors(1, 3) : null; final GetApiKeyResponse getApiKeyResponseExpected = new GetApiKeyResponse( Collections.singletonList( - new ApiKey("api-key-name-1", "api-key-id-1", creation, expiration, false, "user-x", "realm-1", metadata, roleDescriptors) + new ApiKey( + "api-key-name-1", + "api-key-id-1", + creation, + expiration, + false, + "user-x", + "realm-1", + metadata, + roleDescriptors, + limitedByRoleDescriptors + ) ) ); @@ -152,7 +172,8 @@ public void doE "user-x", "realm-1", metadata, - roleDescriptors + roleDescriptors, + limitedByRoleDescriptors ) ) ); @@ -169,6 +190,14 @@ public void testGetApiKeyOwnedByCurrentAuthenticatedUser() throws Exception { } else { param = mapBuilder().put("owner", Boolean.FALSE.toString()).put("realm_name", "realm-1").map(); } + final boolean withLimitedBy = randomBoolean(); + if (withLimitedBy) { + param.put("with_limited_by", "true"); + } else { + if (randomBoolean()) { + param.put("with_limited_by", "false"); + } + } final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(param).build(); @@ -191,7 +220,8 @@ public void sendResponse(RestResponse restResponse) { "user-x", "realm-1", ApiKeyTests.randomMetadata(), - randomUniquelyNamedRoleDescriptors(0, 3) + randomUniquelyNamedRoleDescriptors(0, 3), + withLimitedBy ? randomUniquelyNamedRoleDescriptors(1, 3) : null ); final ApiKey apiKey2 = new ApiKey( "api-key-name-2", @@ -202,7 +232,8 @@ public void sendResponse(RestResponse restResponse) { "user-y", "realm-1", ApiKeyTests.randomMetadata(), - randomUniquelyNamedRoleDescriptors(0, 3) + randomUniquelyNamedRoleDescriptors(0, 3), + withLimitedBy ? randomUniquelyNamedRoleDescriptors(1, 3) : null ); final GetApiKeyResponse getApiKeyResponseExpectedWhenOwnerFlagIsTrue = new GetApiKeyResponse(Collections.singletonList(apiKey1)); final GetApiKeyResponse getApiKeyResponseExpectedWhenOwnerFlagIsFalse = new GetApiKeyResponse(List.of(apiKey1, apiKey2)); From 5a197290ed088cbffc96310662f9b7f730b1b8cc Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Fri, 12 Aug 2022 16:24:11 +1000 Subject: [PATCH 182/265] Drop username from AuthenticateRequest (#88365) Since transport client is no longer supported for 8.x, the username field in AuthenticateRequest is not useful at all. At REST layer, the API never requires passing the username. Authentication should always be performed for the current authenticating/effective subject. This logic does not need to depend on the username. This PR drops the username field and makes the Request class a singleton. Relates: #88335 --- .../action/user/AuthenticateRequest.java | 33 +++++++------------ .../user/AuthenticateRequestBuilder.java | 26 --------------- .../authc/SecurityRealmSettingsTests.java | 5 ++- .../authc/esnative/NativeRealmIntegTests.java | 2 +- .../ServiceAccountSingleNodeTests.java | 10 ++---- .../enrollment/EnrollmentSingleNodeTests.java | 12 +++---- .../security/action/TransportGrantAction.java | 2 +- .../xpack/security/authz/RBACEngine.java | 5 ++- .../rest/action/RestAuthenticateAction.java | 4 +-- .../TransportGrantApiKeyActionTests.java | 2 -- .../TransportAuthenticateActionTests.java | 8 ++--- .../support/SecondaryAuthenticatorTests.java | 10 +++--- .../authz/AuthorizationServiceTests.java | 5 ++- .../xpack/security/authz/RBACEngineTests.java | 26 +++++++++++---- .../ldap/AbstractAdLdapRealmTestCase.java | 6 ++-- .../authc/ldap/ActiveDirectoryRunAsIT.java | 6 ++-- 16 files changed, 64 insertions(+), 98 deletions(-) delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequestBuilder.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java index 488fb5b0fc286..baecb3e2b6bdd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.user; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; @@ -13,20 +14,19 @@ import java.io.IOException; -public class AuthenticateRequest extends ActionRequest implements UserRequest { +public class AuthenticateRequest extends ActionRequest { - private String username; + public static final AuthenticateRequest INSTANCE = new AuthenticateRequest(); public AuthenticateRequest(StreamInput in) throws IOException { super(in); - username = in.readString(); + if (in.getVersion().before(Version.V_8_5_0)) { + // Older versions included the username as a field + in.readString(); + } } - public AuthenticateRequest() {} - - public AuthenticateRequest(String username) { - this.username = username; - } + private AuthenticateRequest() {} @Override public ActionRequestValidationException validate() { @@ -34,22 +34,11 @@ public ActionRequestValidationException validate() { return null; } - public String username() { - return username; - } - - public void username(String username) { - this.username = username; - } - - @Override - public String[] usernames() { - return new String[] { username }; - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(username); + if (out.getVersion().before(Version.V_8_5_0)) { + throw new IllegalStateException("cannot send authenticate request to a node of earlier version"); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequestBuilder.java deleted file mode 100644 index 36ad5cacf2c01..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequestBuilder.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core.security.action.user; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; - -public class AuthenticateRequestBuilder extends ActionRequestBuilder { - - public AuthenticateRequestBuilder(ElasticsearchClient client) { - this(client, AuthenticateAction.INSTANCE); - } - - public AuthenticateRequestBuilder(ElasticsearchClient client, AuthenticateAction action) { - super(client, action, new AuthenticateRequest()); - } - - public AuthenticateRequestBuilder username(String username) { - request.username(username); - return this; - } -} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java index 23f115ce22d34..5dca9b2efdd1f 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java @@ -143,9 +143,8 @@ protected boolean transportSSLEnabled() { } public void testClusterStarted() { - final AuthenticateRequest request = new AuthenticateRequest(); - request.username(nodeClientUsername()); - final AuthenticateResponse authenticate = client().execute(AuthenticateAction.INSTANCE, request).actionGet(10, TimeUnit.SECONDS); + final AuthenticateResponse authenticate = client().execute(AuthenticateAction.INSTANCE, AuthenticateRequest.INSTANCE) + .actionGet(10, TimeUnit.SECONDS); assertThat(authenticate.authentication(), notNullValue()); assertThat(authenticate.authentication().getUser(), notNullValue()); assertThat(authenticate.authentication().getUser().enabled(), is(true)); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index a5acaffdc547c..328b70b2b8282 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -743,7 +743,7 @@ public void testOperationsOnReservedUsers() throws Exception { // authenticate should work AuthenticateResponse authenticateResponse = client().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword())) - ).execute(AuthenticateAction.INSTANCE, new AuthenticateRequest(username)).get(); + ).execute(AuthenticateAction.INSTANCE, AuthenticateRequest.INSTANCE).get(); assertThat(authenticateResponse.authentication().getUser().principal(), is(username)); assertThat(authenticateResponse.authentication().getAuthenticatedBy().getName(), equalTo("reserved")); assertThat(authenticateResponse.authentication().getAuthenticatedBy().getType(), equalTo("reserved")); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountSingleNodeTests.java index d74b11915fbe0..33b037c771767 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountSingleNodeTests.java @@ -86,12 +86,10 @@ protected String configServiceTokens() { } public void testAuthenticateWithServiceFileToken() { - final AuthenticateRequest authenticateRequest = new AuthenticateRequest("elastic/fleet-server"); final AuthenticateResponse authenticateResponse = createServiceAccountClient().execute( AuthenticateAction.INSTANCE, - authenticateRequest + AuthenticateRequest.INSTANCE ).actionGet(); - final String nodeName = node().settings().get(Node.NODE_NAME_SETTING.getKey()); assertThat(authenticateResponse.authentication(), equalTo(getExpectedAuthentication("token1", "file"))); } @@ -101,10 +99,9 @@ public void testApiServiceAccountToken() { final SecureString secretValue1 = createApiServiceToken("api-token-1"); assertThat(cache.count(), equalTo(0)); - final AuthenticateRequest authenticateRequest = new AuthenticateRequest("elastic/fleet-server"); final AuthenticateResponse authenticateResponse = createServiceAccountClient(secretValue1.toString()).execute( AuthenticateAction.INSTANCE, - authenticateRequest + AuthenticateRequest.INSTANCE ).actionGet(); assertThat(authenticateResponse.authentication(), equalTo(getExpectedAuthentication("api-token-1", "index"))); // cache is populated after authenticate @@ -203,10 +200,9 @@ private SecureString createApiServiceToken(String tokenName) { } private void authenticateWithApiToken(String tokenName, SecureString secret) { - final AuthenticateRequest authenticateRequest = new AuthenticateRequest("elastic/fleet-server"); final AuthenticateResponse authenticateResponse = createServiceAccountClient(secret.toString()).execute( AuthenticateAction.INSTANCE, - authenticateRequest + AuthenticateRequest.INSTANCE ).actionGet(); assertThat(authenticateResponse.authentication(), equalTo(getExpectedAuthentication(tokenName, "index"))); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/enrollment/EnrollmentSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/enrollment/EnrollmentSingleNodeTests.java index 6348a63ae14b2..b4df44b6877f6 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/enrollment/EnrollmentSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/enrollment/EnrollmentSingleNodeTests.java @@ -99,10 +99,8 @@ public void testKibanaEnrollmentTokenCreation() throws Exception { "ApiKey " + Base64.getEncoder().encodeToString(enrollmentTokenSetOnce.get().getApiKey().getBytes(StandardCharsets.UTF_8)) ) ); - final AuthenticateResponse authenticateResponse1 = apiKeyClient.execute( - AuthenticateAction.INSTANCE, - new AuthenticateRequest("_xpack_security") - ).actionGet(); + final AuthenticateResponse authenticateResponse1 = apiKeyClient.execute(AuthenticateAction.INSTANCE, AuthenticateRequest.INSTANCE) + .actionGet(); assertThat(authenticateResponse1.authentication().getUser().principal(), equalTo("_xpack_security")); final KibanaEnrollmentResponse kibanaEnrollmentResponse = apiKeyClient.execute( @@ -115,10 +113,8 @@ public void testKibanaEnrollmentTokenCreation() throws Exception { Map.of("Authorization", "Bearer " + kibanaEnrollmentResponse.getTokenValue()) ); - final AuthenticateResponse authenticateResponse2 = kibanaClient.execute( - AuthenticateAction.INSTANCE, - new AuthenticateRequest("elastic/kibana") - ).actionGet(); + final AuthenticateResponse authenticateResponse2 = kibanaClient.execute(AuthenticateAction.INSTANCE, AuthenticateRequest.INSTANCE) + .actionGet(); assertThat(authenticateResponse2.authentication().getUser().principal(), equalTo("elastic/kibana")); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java index 2ae1e7b89025c..5ec52b85627b2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java @@ -79,7 +79,7 @@ public final void doExecute(Task task, Request request, ActionListener authorizationService.authorize( authentication, AuthenticateAction.NAME, - new AuthenticateRequest(effectiveUsername), + AuthenticateRequest.INSTANCE, ActionListener.wrap( ignore2 -> doExecuteWithGrantAuthentication(task, request, authentication, listener), listener::onFailure diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index 6881794b7e64c..839d3a5437c39 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -43,6 +43,7 @@ import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesAction; import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesResponse; @@ -185,7 +186,9 @@ public void authorizeClusterAction( static boolean checkSameUserPermissions(String action, TransportRequest request, Authentication authentication) { final boolean actionAllowed = SAME_USER_PRIVILEGE.test(action); if (actionAllowed) { - if (request instanceof UserRequest userRequest) { + if (request instanceof AuthenticateRequest) { + return true; + } else if (request instanceof UserRequest userRequest) { String[] usernames = userRequest.usernames(); if (usernames == null || usernames.length != 1 || usernames[0] == null) { assert false : "this role should only be used for actions to apply to a single user"; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java index 389dfb146b1fa..1798c25c0644b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java @@ -54,11 +54,9 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c if (user == null) { return restChannel -> { throw new IllegalStateException("we should never have a null user and invoke this consumer"); }; } - final String username = user.principal(); - return channel -> client.execute( AuthenticateAction.INSTANCE, - new AuthenticateRequest(username), + AuthenticateRequest.INSTANCE, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(AuthenticateResponse authenticateResponse, XContentBuilder builder) throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java index abb1584bbca31..5d68dea7a146e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java @@ -273,8 +273,6 @@ public void testGrantWithRunAs() { final Object[] args = invocation.getArguments(); assertThat(args[0], is(authentication)); assertThat(args[1], is(AuthenticateAction.NAME)); - final AuthenticateRequest authenticateRequest = (AuthenticateRequest) args[2]; - assertThat(authenticateRequest.username(), equalTo(runAsUsername)); @SuppressWarnings("unchecked") final ActionListener listener = (ActionListener) args[3]; listener.onResponse(null); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java index f00f948b30bc2..6a4541f52ae90 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java @@ -64,7 +64,7 @@ public void testInternalUser() { final AtomicReference throwableRef = new AtomicReference<>(); final AtomicReference responseRef = new AtomicReference<>(); - action.doExecute(mock(Task.class), new AuthenticateRequest(), new ActionListener() { + action.doExecute(mock(Task.class), AuthenticateRequest.INSTANCE, new ActionListener() { @Override public void onResponse(AuthenticateResponse authenticateResponse) { responseRef.set(authenticateResponse); @@ -101,7 +101,7 @@ public void testNullUser() { final AtomicReference throwableRef = new AtomicReference<>(); final AtomicReference responseRef = new AtomicReference<>(); - action.doExecute(mock(Task.class), new AuthenticateRequest(), new ActionListener() { + action.doExecute(mock(Task.class), AuthenticateRequest.INSTANCE, new ActionListener() { @Override public void onResponse(AuthenticateResponse authenticateResponse) { responseRef.set(authenticateResponse); @@ -128,7 +128,7 @@ public void testValidAuthentication() { final AtomicReference throwableRef = new AtomicReference<>(); final AtomicReference responseRef = new AtomicReference<>(); - action.doExecute(mock(Task.class), new AuthenticateRequest(), new ActionListener<>() { + action.doExecute(mock(Task.class), AuthenticateRequest.INSTANCE, new ActionListener<>() { @Override public void onResponse(AuthenticateResponse authenticateResponse) { responseRef.set(authenticateResponse); @@ -179,7 +179,7 @@ public void testShouldNotAddAnonymousRolesForApiKeyOrServiceAccount() { final AtomicReference throwableRef = new AtomicReference<>(); final AtomicReference responseRef = new AtomicReference<>(); - action.doExecute(mock(Task.class), new AuthenticateRequest(), new ActionListener<>() { + action.doExecute(mock(Task.class), AuthenticateRequest.INSTANCE, new ActionListener<>() { @Override public void onResponse(AuthenticateResponse authenticateResponse) { responseRef.set(authenticateResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java index e216aad001507..e011d55622b9e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java @@ -163,7 +163,7 @@ public void cleanupMocks() throws Exception { } public void testAuthenticateTransportRequestIsANoOpIfHeaderIsMissing() throws Exception { - final TransportRequest request = new AuthenticateRequest(); + final TransportRequest request = AuthenticateRequest.INSTANCE; final PlainActionFuture future = new PlainActionFuture<>(); authenticator.authenticate(AuthenticateAction.NAME, request, future); @@ -181,7 +181,7 @@ public void testAuthenticateRestRequestIsANoOpIfHeaderIsMissing() throws Excepti public void testAuthenticateTransportRequestFailsIfHeaderHasUnrecognizedCredentials() throws Exception { threadPool.getThreadContext().putHeader(SECONDARY_AUTH_HEADER_NAME, "Fake " + randomAlphaOfLengthBetween(5, 30)); - final TransportRequest request = new AuthenticateRequest(); + final TransportRequest request = AuthenticateRequest.INSTANCE; final PlainActionFuture future = new PlainActionFuture<>(); authenticator.authenticate(AuthenticateAction.NAME, request, future); @@ -211,7 +211,7 @@ public void testAuthenticateRestRequestFailsIfHeaderHasUnrecognizedCredentials() public void testAuthenticateTransportRequestSucceedsWithBasicAuthentication() throws Exception { assertAuthenticateWithBasicAuthentication(listener -> { - final TransportRequest request = new AuthenticateRequest(); + final TransportRequest request = AuthenticateRequest.INSTANCE; authenticator.authenticate(AuthenticateAction.NAME, request, listener); }); } @@ -255,7 +255,7 @@ private SecondaryAuthentication assertAuthenticateWithBasicAuthentication(Consum public void testAuthenticateTransportRequestFailsWithIncorrectPassword() throws Exception { assertAuthenticateWithIncorrectPassword(listener -> { - final TransportRequest request = new AuthenticateRequest(); + final TransportRequest request = AuthenticateRequest.INSTANCE; authenticator.authenticate(AuthenticateAction.NAME, request, listener); }); } @@ -319,7 +319,7 @@ public void testAuthenticateUsingBearerToken() throws Exception { SecurityMocks.mockGetRequest(client, SecuritySystemIndices.SECURITY_TOKENS_ALIAS, tokenDocId.get(), tokenSource.get()); - final TransportRequest request = new AuthenticateRequest(); + final TransportRequest request = AuthenticateRequest.INSTANCE; final PlainActionFuture future = new PlainActionFuture<>(); authenticator.authenticate(AuthenticateAction.NAME, request, future); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 33807c6d4d022..0e27fe43237b7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -1743,7 +1743,6 @@ public void testRunAsRequestWithNoRolesUser() { public void testRunAsRequestWithoutLookedUpBy() throws IOException { final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); - AuthenticateRequest request = new AuthenticateRequest("run as me"); roleMap.put("superuser", ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR); final User authUser = new User("test user", "superuser"); Authentication authentication = AuthenticationTestHelper.builder() @@ -1755,7 +1754,7 @@ public void testRunAsRequestWithoutLookedUpBy() throws IOException { authentication.writeToContext(threadContext); assertNotEquals(authUser, authentication.getUser()); assertThrowsAuthorizationExceptionRunAsDenied( - () -> authorize(authentication, AuthenticateAction.NAME, request), + () -> authorize(authentication, AuthenticateAction.NAME, AuthenticateRequest.INSTANCE), AuthenticateAction.NAME, "test user", "run as me" @@ -1764,7 +1763,7 @@ public void testRunAsRequestWithoutLookedUpBy() throws IOException { eq(requestId), eq(authentication), eq(AuthenticateAction.NAME), - eq(request), + eq(AuthenticateRequest.INSTANCE), authzInfoRoles(new String[] { ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName() }) ); verifyNoMoreInteractions(auditTrail); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index 90bd5499f1afb..42550d5668a03 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -36,12 +36,14 @@ import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; -import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.DeleteUserAction; import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesResponse; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilder; import org.elasticsearch.xpack.core.security.action.user.PutUserAction; import org.elasticsearch.xpack.core.security.action.user.UserRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; @@ -132,8 +134,8 @@ public void testSameUserPermission() { final boolean changePasswordRequest = randomBoolean(); final TransportRequest request = changePasswordRequest ? new ChangePasswordRequestBuilder(mock(Client.class)).username(user.principal()).request() - : new AuthenticateRequestBuilder(mock(Client.class)).username(user.principal()).request(); - final String action = changePasswordRequest ? ChangePasswordAction.NAME : AuthenticateAction.NAME; + : new HasPrivilegesRequestBuilder(mock(Client.class)).username(user.principal()).request(); + final String action = changePasswordRequest ? ChangePasswordAction.NAME : HasPrivilegesAction.NAME; final Authentication.RealmRef authenticatedBy = new Authentication.RealmRef( randomAlphaOfLengthBetween(3, 8), changePasswordRequest ? randomFrom(ReservedRealm.TYPE, NativeRealmSettings.TYPE) : randomAlphaOfLengthBetween(4, 12), @@ -152,8 +154,8 @@ public void testSameUserPermissionDoesNotAllowNonMatchingUsername() { final String username = randomFrom("", "joe" + randomAlphaOfLengthBetween(1, 5), randomAlphaOfLengthBetween(3, 10)); final TransportRequest request = changePasswordRequest ? new ChangePasswordRequestBuilder(mock(Client.class)).username(username).request() - : new AuthenticateRequestBuilder(mock(Client.class)).username(username).request(); - final String action = changePasswordRequest ? ChangePasswordAction.NAME : AuthenticateAction.NAME; + : new HasPrivilegesRequestBuilder(mock(Client.class)).username(username).request(); + final String action = changePasswordRequest ? ChangePasswordAction.NAME : HasPrivilegesAction.NAME; final Authentication.RealmRef authenticatedBy = new Authentication.RealmRef( randomAlphaOfLengthBetween(3, 8), @@ -178,11 +180,21 @@ public void testSameUserPermissionDoesNotAllowNonMatchingUsername() { if (request instanceof ChangePasswordRequest) { ((ChangePasswordRequest) request).username("joe"); } else { - ((AuthenticateRequest) request).username("joe"); + ((HasPrivilegesRequest) request).username("joe"); } assertTrue(RBACEngine.checkSameUserPermissions(action, request, authentication)); } + public void testSameUserPermissionForAuthenticateRequest() { + assertTrue( + RBACEngine.checkSameUserPermissions( + AuthenticateAction.NAME, + AuthenticateRequest.INSTANCE, + AuthenticationTestHelper.builder().build() + ) + ); + } + public void testSameUserPermissionDoesNotAllowOtherActions() { final TransportRequest request = mock(TransportRequest.class); final String action = randomFrom( @@ -206,7 +218,7 @@ public void testSameUserPermissionRunAsChecksAuthenticatedBy() { final boolean changePasswordRequest = randomBoolean(); final TransportRequest request = changePasswordRequest ? new ChangePasswordRequestBuilder(mock(Client.class)).username(username).request() - : new AuthenticateRequestBuilder(mock(Client.class)).username(username).request(); + : new HasPrivilegesRequestBuilder(mock(Client.class)).username(username).request(); final String action = changePasswordRequest ? ChangePasswordAction.NAME : AuthenticateAction.NAME; final Authentication.RealmRef authenticatedBy = AuthenticationTestHelper.randomRealmRef(false); diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java index 872efa98cb902..cfdd55eee52d7 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java @@ -25,7 +25,8 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; -import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; @@ -296,8 +297,7 @@ protected static String userHeader(String username, String password) { private void authenticateUser(Client client, String username, int retryCount) { for (int i = 1; i <= retryCount; i++) { try { - final AuthenticateResponse response = new AuthenticateRequestBuilder(client).username(username) - .execute() + final AuthenticateResponse response = client.execute(AuthenticateAction.INSTANCE, AuthenticateRequest.INSTANCE) .actionGet(10, TimeUnit.SECONDS); assertThat(response.authentication().getUser().principal(), is(username)); return; diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRunAsIT.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRunAsIT.java index e16d0fca1d389..40f4854c6d5ba 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRunAsIT.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRunAsIT.java @@ -64,8 +64,10 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testRunAs() throws Exception { String avenger = realmConfig.loginWithCommonName ? "Natasha Romanoff" : "blackwidow"; - final AuthenticateRequest request = new AuthenticateRequest(avenger); - final ActionFuture future = runAsClient(avenger).execute(AuthenticateAction.INSTANCE, request); + final ActionFuture future = runAsClient(avenger).execute( + AuthenticateAction.INSTANCE, + AuthenticateRequest.INSTANCE + ); final AuthenticateResponse response = future.get(30, TimeUnit.SECONDS); assertThat(response.authentication().getUser().principal(), Matchers.equalTo(avenger)); } From da3e4e8a0b2de5c51b564585c8146c23f97dda98 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 12 Aug 2022 09:15:30 +0200 Subject: [PATCH 183/265] Keep test folders of failed build tools integration tests (#89296) This simplifies our debugging logic we have in place to keep the test build environment for failed tests, making debugging easier without dealing with magic flags --- .../fixtures/AbstractGradleFuncTest.groovy | 12 +++++- .../internal/test/TestResultExtension.java | 37 +++++++++++++++++++ ...amework.runtime.extension.IGlobalExtension | 9 +++++ 3 files changed, 56 insertions(+), 2 deletions(-) create mode 100644 build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/TestResultExtension.java create mode 100644 build-tools/src/testFixtures/resources/META-INF/services/org.spockframework.runtime.extension.IGlobalExtension diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index 7719c63f37710..87bff62d0184e 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -12,6 +12,7 @@ import org.apache.commons.io.FileUtils import org.elasticsearch.gradle.internal.test.ConfigurationCacheCompatibleAwareGradleRunner import org.elasticsearch.gradle.internal.test.InternalAwareGradleRunner import org.elasticsearch.gradle.internal.test.NormalizeOutputGradleRunner +import org.elasticsearch.gradle.internal.test.TestResultExtension import org.gradle.testkit.runner.BuildResult import org.gradle.testkit.runner.GradleRunner import org.junit.Rule @@ -47,9 +48,9 @@ abstract class AbstractGradleFuncTest extends Specification { } def cleanup() { -// if (Boolean.getBoolean('test.keep.samplebuild')) { + if (featureFailed()) { FileUtils.copyDirectory(testProjectDir.root, new File("build/test-debug/" + testProjectDir.root.name)) -// } + } } File subProject(String subProjectPath) { @@ -205,6 +206,13 @@ checkstyle = "com.puppycrawl.tools:checkstyle:10.3" } + boolean featureFailed() { + specificationContext.currentSpec.listeners + .findAll { it instanceof TestResultExtension.ErrorListener } + .any { + (it as TestResultExtension.ErrorListener).errorInfo != null } + } + static class ProjectConfigurer { private File projectDir diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/TestResultExtension.java b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/TestResultExtension.java new file mode 100644 index 0000000000000..c08f25843c721 --- /dev/null +++ b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/TestResultExtension.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test; + +import org.spockframework.runtime.AbstractRunListener; +import org.spockframework.runtime.extension.IGlobalExtension; +import org.spockframework.runtime.model.ErrorInfo; +import org.spockframework.runtime.model.IterationInfo; +import org.spockframework.runtime.model.SpecInfo; + +public class TestResultExtension implements IGlobalExtension { + + @Override + public void visitSpec(SpecInfo spec) { + spec.addListener(new ErrorListener()); + } + + public static class ErrorListener extends AbstractRunListener { + ErrorInfo errorInfo; + + @Override + public void beforeIteration(IterationInfo iteration) { + errorInfo = null; + } + + @Override + public void error(ErrorInfo error) { + errorInfo = error; + } + } +} diff --git a/build-tools/src/testFixtures/resources/META-INF/services/org.spockframework.runtime.extension.IGlobalExtension b/build-tools/src/testFixtures/resources/META-INF/services/org.spockframework.runtime.extension.IGlobalExtension new file mode 100644 index 0000000000000..7d1e63d129ca1 --- /dev/null +++ b/build-tools/src/testFixtures/resources/META-INF/services/org.spockframework.runtime.extension.IGlobalExtension @@ -0,0 +1,9 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +org.elasticsearch.gradle.internal.test.TestResultExtension \ No newline at end of file From 654f31dd3b805daf44ecc9c200903c92837ec9ee Mon Sep 17 00:00:00 2001 From: Carlos Crespo Date: Fri, 12 Aug 2022 09:44:00 +0200 Subject: [PATCH 184/265] Update libbeat config module fields type (#88990) * Update libbeat config module fields type * Increment STACK_MONITORING_REGISTRY_VERSION Co-authored-by: Elastic Machine --- .../src/main/resources/monitoring-beats-mb.json | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/main/resources/monitoring-beats-mb.json b/x-pack/plugin/core/src/main/resources/monitoring-beats-mb.json index d2397a29a7982..a883feb674f83 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-beats-mb.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-beats-mb.json @@ -735,13 +735,16 @@ "config": { "properties": { "running": { - "type": "short" + "type": "long" }, "starts": { - "type": "short" + "type": "long" }, "stops": { - "type": "short" + "type": "long" + }, + "reloads": { + "type": "long" } } }, @@ -1787,6 +1790,10 @@ "path": "beat.stats.libbeat.config.stops" } } + }, + "reloads": { + "type": "alias", + "path": "beat.stats.libbeat.config.reloads" } } }, From ed940b6ed5081f601e8ef8cbb991e8a577b9adc2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 12 Aug 2022 09:52:58 +0100 Subject: [PATCH 185/265] Clarify that TransportService#sendRequest never throws (#89298) It's not obvious from reading the code that `TransportService#sendRequest` and friends always catch exceptions and pass them to the response handler, which means some callers are wrapping calls to `sendRequest` in their own unnecessary try/catch blocks. This commit makes it clear that all exceptions are handled and removes the unnecessary exception handling in callers. Closes #89274 --- .../CoordinationDiagnosticsService.java | 24 ++++----- .../PublicationTransportHandler.java | 30 +++++------ .../transport/TransportService.java | 50 ++++++++++++------- .../AbstractSimpleTransportTestCase.java | 6 +-- 4 files changed, 54 insertions(+), 56 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index b474cb67772a5..82b85329d9dbd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -991,20 +991,16 @@ private Scheduler.Cancellable sendTransportRequest logger.trace("Opened connection to {}, making transport request", masterEligibleNode); // If we don't get a response in 10 seconds that is a failure worth capturing on its own: final TimeValue transportTimeout = TimeValue.timeValueSeconds(10); - try { - transportService.sendRequest( - masterEligibleNode, - transportActionType.name(), - transportActionRequest, - TransportRequestOptions.timeout(transportTimeout), - new ActionListenerResponseHandler<>( - ActionListener.runBefore(fetchRemoteResultListener, () -> Releasables.close(releasable)), - transportActionType.getResponseReader() - ) - ); - } catch (Exception e) { - responseConsumer.accept(responseTransformationFunction.apply(null, e)); - } + transportService.sendRequest( + masterEligibleNode, + transportActionType.name(), + transportActionRequest, + TransportRequestOptions.timeout(transportTimeout), + new ActionListenerResponseHandler<>( + ActionListener.runBefore(fetchRemoteResultListener, () -> Releasables.close(releasable)), + transportActionType.getResponseReader() + ) + ); } }, e -> { logger.warn("Exception connecting to master masterEligibleNode", e); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java index 048b2e6665102..67dddfa6c247b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java @@ -423,24 +423,18 @@ private void sendClusterState( listener.onFailure(new IllegalStateException("serialized cluster state released before transmission")); return; } - try { - transportService.sendChildRequest( - destination, - PUBLISH_STATE_ACTION_NAME, - new BytesTransportRequest(bytes, destination.getVersion()), - task, - STATE_REQUEST_OPTIONS, - new ActionListenerResponseHandler<>( - ActionListener.runAfter(listener, bytes::decRef), - PublishWithJoinResponse::new, - ThreadPool.Names.CLUSTER_COORDINATION - ) - ); - } catch (Exception e) { - assert false : e; - logger.warn(() -> format("error sending cluster state to %s", destination), e); - listener.onFailure(e); - } + transportService.sendChildRequest( + destination, + PUBLISH_STATE_ACTION_NAME, + new BytesTransportRequest(bytes, destination.getVersion()), + task, + STATE_REQUEST_OPTIONS, + new ActionListenerResponseHandler<>( + ActionListener.runAfter(listener, bytes::decRef), + PublishWithJoinResponse::new, + ThreadPool.Names.CLUSTER_COORDINATION + ) + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 3a30cbb433506..1b7fe6b615073 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -716,9 +716,15 @@ public final void sendRequest( final Transport.Connection connection; try { connection = getConnection(node); - } catch (final NodeNotConnectedException ex) { - // the caller might not handle this so we invoke the handler - handler.handleException(ex); + } catch (TransportException transportException) { + // should only be a NodeNotConnectedException in practice, but handle all cases anyway to be sure + assert transportException instanceof NodeNotConnectedException : transportException; + handleSendRequestException(handler, transportException); + return; + } catch (Exception exception) { + // shouldn't happen in practice, but handle it anyway to be sure + assert false : exception; + handleSendRequestException(handler, new SendRequestTransportException(node, action, exception)); return; } sendRequest(connection, action, request, options, handler); @@ -776,25 +782,25 @@ public final void sendRequest( delegate = handler; } asyncSender.sendRequest(connection, action, request, options, delegate); - } catch (final Exception ex) { - handleSendRequestException(connection, action, handler, ex); + } catch (TransportException transportException) { + handleSendRequestException(handler, transportException); + } catch (Exception exception) { + handleSendRequestException(handler, new SendRequestTransportException(connection.getNode(), action, exception)); } } - private void handleSendRequestException( - Transport.Connection connection, - String action, + private static void handleSendRequestException( TransportResponseHandler handler, - Exception ex + TransportException transportException ) { - // the caller might not handle this so we invoke the handler - final TransportException te; - if (ex instanceof TransportException tex) { - te = tex; - } else { - te = new SendRequestTransportException(connection.getNode(), action, ex); + try { + handler.handleException(transportException); + } catch (Exception innerException) { + // should not happen + innerException.addSuppressed(transportException); + logger.error("unexpected exception from handler.handleException", innerException); + assert false : innerException; } - handler.handleException(te); } /** @@ -820,9 +826,15 @@ public final void sendChildRequest( final Transport.Connection connection; try { connection = getConnection(node); - } catch (final NodeNotConnectedException ex) { - // the caller might not handle this so we invoke the handler - handler.handleException(ex); + } catch (TransportException transportException) { + // should only be a NodeNotConnectedException in practice, but handle all cases anyway to be sure + assert transportException instanceof NodeNotConnectedException : transportException; + handleSendRequestException(handler, transportException); + return; + } catch (Exception exception) { + // shouldn't happen in practice, but handle it anyway to be sure + assert false : exception; + handleSendRequestException(handler, new SendRequestTransportException(node, action, exception)); return; } sendChildRequest(connection, action, request, parentTask, options, handler); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index b823a2b3f8b7c..a0c826dacbbee 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -3176,11 +3176,7 @@ public static Future submitRequest( responseListener.whenComplete(handler::handleResponse, e -> handler.handleException((TransportException) e)); final PlainActionFuture future = PlainActionFuture.newFuture(); responseListener.addListener(future); - try { - transportService.sendRequest(node, action, request, options, futureHandler); - } catch (NodeNotConnectedException ex) { - futureHandler.handleException(ex); - } + transportService.sendRequest(node, action, request, options, futureHandler); return future; } } From f9055b5acfffe5a6354938d5ee8bb575baa80034 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 12 Aug 2022 10:06:23 +0100 Subject: [PATCH 186/265] Miscellaneous cleanups in TransportService (#89299) --- .../transport/TransportService.java | 54 ++++--------------- 1 file changed, 11 insertions(+), 43 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 1b7fe6b615073..e935bb1e1578e 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -90,14 +90,12 @@ public class TransportService extends AbstractLifecycleComponent // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they // do show up, we can print more descriptive information about them - final Map timeoutInfoHandlers = Collections.synchronizedMap( - new LinkedHashMap(100, .75F, true) { - @Override - protected boolean removeEldestEntry(Map.Entry eldest) { - return size() > 100; - } + final Map timeoutInfoHandlers = Collections.synchronizedMap(new LinkedHashMap<>(100, .75F, true) { + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + return size() > 100; } - ); + }); public static final TransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new TransportInterceptor() { }; @@ -589,7 +587,6 @@ public static class HandshakeResponse extends TransportResponse { private final Version version; - @Nullable // if version < BUILD_HASH_HANDSHAKE_VERSION private final String buildHash; private final DiscoveryNode discoveryNode; @@ -685,8 +682,8 @@ public void addMessageListener(TransportMessageListener listener) { messageListener.listeners.add(listener); } - public boolean removeMessageListener(TransportMessageListener listener) { - return messageListener.listeners.remove(listener); + public void removeMessageListener(TransportMessageListener listener) { + messageListener.listeners.remove(listener); } public void addConnectionListener(TransportConnectionListener listener) { @@ -735,8 +732,8 @@ public final void sendRequest( */ public static Transport.Connection unwrapConnection(Transport.Connection connection) { Transport.Connection unwrapped = connection; - while (unwrapped instanceof RemoteConnectionManager.ProxyConnection) { - unwrapped = ((RemoteConnectionManager.ProxyConnection) unwrapped).getConnection(); + while (unwrapped instanceof RemoteConnectionManager.ProxyConnection proxyConnection) { + unwrapped = proxyConnection.getConnection(); } return unwrapped; } @@ -938,7 +935,7 @@ public void onFailure(Exception e) { } @Override - protected void doRun() throws Exception { + protected void doRun() { contextToNotify.handler().handleException(sendRequestException); } }); @@ -1334,36 +1331,7 @@ private void scheduleTimeout(TimeValue timeout) { } } - static class TimeoutInfoHolder { - - private final DiscoveryNode node; - private final String action; - private final long sentTime; - private final long timeoutTime; - - TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) { - this.node = node; - this.action = action; - this.sentTime = sentTime; - this.timeoutTime = timeoutTime; - } - - public DiscoveryNode node() { - return node; - } - - public String action() { - return action; - } - - public long sentTime() { - return sentTime; - } - - public long timeoutTime() { - return timeoutTime; - } - } + record TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) {} /** * This handler wrapper ensures that the response thread executes with the correct thread context. Before any of the handle methods From e4c7febea11618c39ceea45716bacb196ff0fc7f Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Fri, 12 Aug 2022 11:08:05 +0200 Subject: [PATCH 187/265] Fix: Update geo-bounding-box-query.asciidoc (#87459) (#89301) There are some redundant words so I just removed those words. Please accept this change. (cherry picked from commit e1e539805154f8a6ed2b7d7cb9eaabda146182d7) Co-authored-by: Adnan Ashraf --- docs/reference/query-dsl/geo-bounding-box-query.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 3fb6c11181a19..5d132ee90bcda 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -10,7 +10,7 @@ intersect a bounding box. [discrete] [[geo-bounding-box-query-ex]] ==== Example -Assume the following the following documents are indexed: +Assume the following documents are indexed: [source,console] -------------------------------------------------- From 0502139885818e117c6d8bbcf91fc0ea3b5f79b1 Mon Sep 17 00:00:00 2001 From: Valeriy Khakhutskyy <1292899+valeriy42@users.noreply.github.com> Date: Fri, 12 Aug 2022 13:01:43 +0200 Subject: [PATCH 188/265] [ML] Feature importance test for house pricing data (#89307) This PR implements a test to capture missing feature importance values for the house pricing dataset described in #88536. Since the failure occurs very irregularly, we want to capture the random seed to be able to dig into a specific case. It doesn't affect the functionality; hence I mark this PR as a non-issue. --- .../ClassificationHousePricingIT.java | 1666 +++++++++++++++++ 1 file changed, 1666 insertions(+) create mode 100644 x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationHousePricingIT.java diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationHousePricingIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationHousePricingIT.java new file mode 100644 index 0000000000000..9b9fbe5cc0b3b --- /dev/null +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationHousePricingIT.java @@ -0,0 +1,1666 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; +import org.elasticsearch.xpack.core.ml.dataframe.analyses.BoostedTreeParams; +import org.elasticsearch.xpack.core.ml.dataframe.analyses.Classification; +import org.junit.After; +import org.junit.Before; + +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +/** + * This test targets the problem of missing feature importance values on the + * house pricing + * dataset described in https://github.com/elastic/elasticsearch/issues/88536. + */ +public class ClassificationHousePricingIT extends MlNativeDataFrameAnalyticsIntegTestCase { + + static final List DATA = List.of( + "856,Y,1,548,2,1,CollgCr,2003", + "1262,Y,1,460,2,1,Veenker,1976", + "920,Y,1,608,2,1,CollgCr,2001", + "961,Y,1,642,3,1,Crawfor,1915", + "1145,Y,1,836,3,1,NoRidge,2000", + "796,Y,1,480,2,1,Mitchel,1993", + "1694,Y,1,636,2,1,Somerst,2004", + "1107,Y,1,484,2,1,NWAmes,1973", + "1022,Y,0,468,2,1,OldTown,1931", + "1077,Y,1,205,1,1,BrkSide,1939", + "1040,Y,1,384,1,1,Sawyer,1965", + "1182,Y,1,736,3,1,NridgHt,2005", + "912,Y,1,352,1,1,Sawyer,1962", + "1494,Y,1,840,3,1,CollgCr,2006", + "1253,Y,1,352,1,1,NAmes,1960", + "854,Y,0,576,2,1,BrkSide,1929", + "1004,Y,1,480,2,1,NAmes,1970", + "1296,Y,1,516,2,1,Sawyer,1967", + "1114,Y,1,576,2,1,SawyerW,2004", + "1339,Y,1,294,1,1,NAmes,1958", + "1158,Y,1,853,3,1,NridgHt,2005", + "1108,Y,0,280,1,1,IDOTRR,1930", + "1795,Y,1,534,2,1,CollgCr,2002", + "1060,Y,1,572,2,1,MeadowV,1976", + "1060,Y,1,270,1,1,Sawyer,1968", + "1600,Y,1,890,3,1,NridgHt,2007", + "900,Y,1,576,2,1,NAmes,1951", + "1704,Y,1,772,3,1,NridgHt,2007", + "1600,Y,1,319,1,1,NAmes,1957", + "520,N,1,240,1,1,BrkSide,1927", + "649,N,1,250,1,1,IDOTRR,1920", + "1228,Y,1,271,1,1,Sawyer,1966", + "1234,Y,1,484,2,1,CollgCr,2007", + "1700,Y,1,447,2,1,NAmes,1959", + "1561,Y,1,556,2,1,NridgHt,2005", + "1132,Y,1,691,3,1,NridgHt,2004", + "1097,Y,1,672,2,1,CollgCr,1994", + "1297,Y,1,498,2,1,NAmes,1954", + "1057,Y,1,246,1,1,NAmes,1953", + "1152,N,0,0,0,1,Edwards,1955", + "1324,Y,1,440,2,1,NAmes,1965", + "1328,Y,1,308,1,1,Timber,1959", + "884,Y,1,504,2,1,SawyerW,1983", + "938,Y,1,308,1,1,CollgCr,1975", + "1150,Y,0,300,1,1,NAmes,1959", + "1752,Y,1,576,2,1,NridgHt,2005", + "1518,Y,1,670,2,1,Mitchel,2003", + "1656,Y,1,826,3,1,Somerst,2006", + "736,Y,1,0,0,1,OldTown,1920", + "955,Y,1,386,1,1,Sawyer,1966", + "794,Y,1,388,2,1,Gilbert,1997", + "816,Y,1,528,2,1,BrkSide,1934", + "816,N,1,516,2,1,IDOTRR,1963", + "1842,Y,1,894,3,1,Veenker,1981", + "1360,Y,1,572,2,1,NAmes,1955", + "1425,Y,1,576,2,1,NAmes,1964", + "983,Y,1,480,2,1,Somerst,1999", + "860,Y,1,565,2,1,CollgCr,2004", + "1426,Y,1,641,3,1,StoneBr,2006", + "780,Y,1,352,1,1,CollgCr,1972", + "1158,Y,1,576,2,1,SawyerW,2004", + "581,N,1,288,1,1,IDOTRR,1920", + "1370,Y,1,484,2,1,NridgHt,2006", + "902,Y,1,480,2,1,OldTown,1921", + "1057,Y,1,645,2,1,CollgCr,1997", + "1143,Y,1,852,3,1,NridgHt,2004", + "2207,Y,1,576,2,1,NAmes,1970", + "1479,Y,1,558,2,1,CollgCr,2003", + "747,Y,1,220,1,1,OldTown,1945", + "1304,Y,1,667,2,1,ClearCr,1953", + "2223,Y,1,516,2,1,NAmes,1973", + "845,Y,1,360,2,1,Mitchel,1982", + "885,Y,1,427,2,1,Gilbert,1998", + "1086,Y,1,490,2,1,NAmes,1954", + "840,N,1,379,1,1,OldTown,1915", + "526,Y,1,297,1,1,MeadowV,1973", + "952,Y,0,283,1,1,NAmes,1956", + "1072,Y,1,240,1,1,BrkSide,1948", + "1768,N,1,0,0,1,Sawyer,1968", + "682,Y,1,440,2,1,OldTown,1910", + "1182,Y,1,509,1,1,NAmes,1968", + "1337,Y,1,405,2,1,Mitchel,1998", + "1563,Y,1,758,3,1,Somerst,2007", + "1065,Y,1,461,2,1,NAmes,1960", + "804,Y,1,400,2,1,Gilbert,1995", + "1301,Y,1,462,2,1,NoRidge,1991", + "684,Y,1,400,2,1,Gilbert,2005", + "612,Y,1,528,2,1,Somerst,2009", + "1013,N,1,0,0,1,IDOTRR,1915", + "990,Y,1,0,0,1,CollgCr,1994", + "1040,Y,0,420,2,1,NAmes,1950", + "1235,Y,1,480,2,1,NAmes,1961", + "964,Y,1,432,2,1,Crawfor,1921", + "1260,N,1,506,2,1,OldTown,1910", + "905,Y,1,684,2,1,CollgCr,1997", + "680,Y,1,420,2,1,Gilbert,1993", + "1588,Y,1,472,2,1,CollgCr,1999", + "960,Y,0,432,1,1,Edwards,1965", + "835,N,1,366,1,1,Edwards,1920", + "1225,Y,1,0,0,1,NAmes,1959", + "1610,Y,1,480,2,1,NWAmes,1977", + "977,Y,1,476,2,1,SawyerW,1985", + "1535,Y,1,410,2,1,SawyerW,1979", + "1226,Y,1,740,3,1,CollgCr,2009", + "1226,Y,0,240,1,1,IDOTRR,1931", + "1053,Y,1,648,2,1,Somerst,2003", + "1047,Y,1,273,1,1,OldTown,1885", + "789,Y,1,250,1,1,OldTown,1948", + "997,N,0,0,0,1,IDOTRR,1919", + "1844,Y,1,546,2,1,NWAmes,1977", + "1216,Y,1,325,1,1,Edwards,1954", + "774,Y,1,400,2,1,Gilbert,2000", + "1282,Y,1,792,3,1,CollgCr,2007", + "2259,Y,1,450,2,1,Crawfor,1953", + "1436,Y,1,180,1,1,Crawfor,1945", + "729,Y,1,440,2,1,Somerst,1999", + "1092,Y,1,288,1,1,Sawyer,1962", + "1125,Y,1,430,2,1,Edwards,2006", + "1699,Y,1,594,3,1,SawyerW,1990", + "728,Y,1,390,2,1,CollgCr,2005", + "988,Y,1,540,2,1,ClearCr,1969", + "772,Y,1,264,1,1,IDOTRR,1939", + "1080,Y,1,288,1,1,NAmes,1958", + "1199,Y,1,530,2,1,SawyerW,1993", + "1586,Y,1,435,2,1,NWAmes,1979", + "520,N,1,0,0,1,IDOTRR,1935", + "958,Y,1,440,2,1,NPkVill,1976", + "840,Y,1,0,0,1,OldTown,1930", + "660,Y,1,453,2,1,NAmes,1966", + "1053,Y,0,750,2,1,NAmes,1958", + "1216,Y,1,487,2,1,NAmes,1966", + "1022,Y,1,390,2,1,Gilbert,2000", + "1327,Y,1,624,2,1,NAmes,1959", + "1296,Y,1,471,2,1,Timber,2001", + "1721,Y,1,440,2,1,Sawyer,1968", + "1682,Y,1,530,2,1,NWAmes,1970", + "1214,Y,1,318,1,1,NAmes,1967", + "1959,Y,1,766,3,1,Mitchel,1988", + "1004,Y,1,660,3,1,CollgCr,1999", + "928,Y,1,470,2,1,CollgCr,1997", + "864,Y,1,0,0,1,NAmes,1971", + "1734,Y,1,660,2,1,CollgCr,2005", + "910,Y,1,720,2,1,NAmes,1952", + "1501,Y,1,577,2,1,CollgCr,1999", + "1728,Y,1,504,2,1,Sawyer,1963", + "970,Y,1,380,2,1,Edwards,2004", + "875,Y,0,180,1,1,BrkSide,1931", + "884,Y,1,434,2,1,Gilbert,2001", + "1080,Y,1,0,0,1,SawyerW,2004", + "896,Y,0,240,1,1,BrkSide,1936", + "969,Y,1,440,2,1,CollgCr,1975", + "1710,Y,1,866,3,1,NridgHt,2007", + "1097,Y,1,495,2,1,NWAmes,1971", + "1252,Y,1,564,2,1,ClearCr,1960", + "1200,Y,0,312,1,1,OldTown,1923", + "572,N,0,0,0,0,Edwards,1924", + "1040,Y,0,625,2,1,NAmes,1950", + "774,Y,1,680,3,1,Timber,2009", + "991,Y,1,678,2,1,Somerst,2004", + "1392,Y,1,576,2,1,Gilbert,2005", + "1232,Y,1,516,2,1,Veenker,1984", + "1572,Y,1,726,3,1,NridgHt,2003", + "1541,Y,1,532,2,1,NridgHt,2005", + "882,Y,1,0,0,1,OldTown,1956", + "1149,Y,0,216,1,1,OldTown,1926", + "808,Y,1,0,0,1,Edwards,1940", + "1867,Y,0,303,1,1,ClearCr,1955", + "1610,Y,1,789,3,1,NridgHt,2007", + "840,Y,1,440,2,1,Gilbert,2004", + "1707,Y,1,511,2,1,Timber,1981", + "854,Y,1,660,2,1,OldTown,1941", + "1656,Y,1,528,2,1,NAmes,1960", + "1064,Y,1,504,2,1,StoneBr,1987", + "1362,Y,1,504,2,1,NAmes,1961", + "1651,Y,1,616,2,1,Timber,1986", + "2158,Y,1,576,2,1,Edwards,1950", + "1164,Y,1,521,2,1,ClearCr,1988", + "1252,Y,1,451,2,1,Sawyer,1958", + "2234,Y,1,1166,3,1,StoneBr,2008", + "968,Y,1,480,2,1,OldTown,1923", + "769,Y,1,440,2,1,Somerst,2000", + "901,Y,1,216,1,1,Crawfor,1920", + "1340,Y,1,252,1,1,Edwards,1957", + "1362,Y,1,484,2,1,OldTown,2003", + "936,Y,1,576,2,1,BrkSide,1908", + "1518,Y,1,840,3,1,OldTown,1892", + "1217,Y,1,497,2,1,Mitchel,1990", + "808,Y,1,180,1,1,OldTown,1916", + "1224,Y,1,528,2,1,SawyerW,1979", + "1593,Y,1,682,2,1,StoneBr,2001", + "1549,Y,1,440,2,1,Crawfor,1932", + "725,Y,1,484,2,1,NAmes,1972", + "1431,Y,1,666,2,1,CollgCr,1999", + "970,Y,1,380,2,1,Edwards,2004", + "864,Y,1,352,1,1,CollgCr,1972", + "855,Y,1,440,2,1,NPkVill,1976", + "1726,Y,1,786,3,1,Somerst,2007", + "1360,Y,1,795,2,1,NAmes,1918", + "929,Y,1,0,0,1,OldTown,1912", + "1713,Y,1,856,3,1,NridgHt,2004", + "1121,Y,1,440,2,1,Edwards,2003", + "1279,Y,1,473,2,1,Mitchel,1977", + "865,Y,1,398,1,1,OldTown,1924", + "848,Y,1,420,2,1,CollgCr,2004", + "720,Y,1,240,1,1,OldTown,1947", + "1442,Y,1,500,2,1,Gilbert,1990", + "1696,Y,1,349,1,1,Sawyer,1962", + "1100,Y,1,312,1,1,NAmes,1960", + "1180,Y,1,454,2,1,SawyerW,1988", + "1092,Y,1,504,2,1,NAmes,1964", + "864,N,0,0,0,1,Edwards,1925", + "1212,Y,1,460,2,1,Edwards,2009", + "932,Y,1,644,2,1,Somerst,2009", + "990,Y,1,576,2,1,CollgCr,1995", + "689,Y,1,299,1,1,CollgCr,1977", + "1236,Y,1,447,1,1,NAmes,1957", + "1436,Y,1,484,2,1,CollgCr,2004", + "810,N,1,210,1,1,OldTown,1925", + "1137,Y,1,431,2,1,Crawfor,1939", + "1248,Y,1,438,2,1,Blmngtn,2005", + "1498,Y,1,675,2,1,CollgCr,2006", + "1010,Y,1,390,2,1,Gilbert,2002", + "811,Y,1,434,2,1,NWAmes,1975", + "864,Y,1,576,2,1,NAmes,1971", + "2392,Y,1,968,3,1,NridgHt,2003", + "630,Y,1,280,1,1,BrDale,1971", + "1214,Y,1,721,3,1,NoRidge,1995", + "483,Y,1,280,1,1,BrDale,1970", + "912,Y,1,336,1,1,Sawyer,1967", + "1555,Y,1,430,2,1,Blmngtn,2005", + "1194,Y,1,312,1,1,NAmes,1959", + "1490,Y,1,810,3,1,NoRidge,1995", + "483,Y,1,288,1,1,BrDale,1972", + "894,Y,1,308,1,1,CollgCr,1976", + "860,Y,1,440,2,1,Gilbert,2002", + "483,Y,1,264,1,1,BrDale,1971", + "1414,Y,1,494,2,1,CollgCr,2004", + "1014,Y,1,457,2,1,SawyerW,1993", + "1694,Y,1,818,3,1,NridgHt,2007", + "798,Y,0,220,1,1,Edwards,1945", + "1566,Y,1,750,2,1,Somerst,2008", + "866,Y,1,0,0,1,OldTown,1945", + "889,N,0,352,1,1,OldTown,1900", + "626,Y,1,288,1,1,SawyerW,1980", + "1222,Y,1,463,2,1,SawyerW,1994", + "1872,Y,1,604,2,1,NWAmes,1988", + "908,N,0,440,1,1,OldTown,1910", + "1375,Y,1,451,2,1,NAmes,1954", + "840,Y,1,500,2,1,CollgCr,2003", + "1444,Y,1,389,2,1,ClearCr,1958", + "1306,Y,1,0,0,1,BrkSide,1940", + "1625,Y,1,538,2,1,Crawfor,2006", + "798,Y,1,520,2,1,SawyerW,2004", + "1302,Y,1,309,1,1,NAmes,1964", + "1314,Y,1,294,1,1,NAmes,1957", + "1005,Y,1,429,2,1,Gilbert,1999", + "864,Y,1,673,2,1,Somerst,2003", + "1604,Y,1,660,2,1,CollgCr,2006", + "963,Y,1,564,2,1,CollgCr,2001", + "882,Y,0,308,1,1,OldTown,1956", + "1382,Y,1,884,2,1,NAmes,1962", + "1482,Y,1,868,3,1,CollgCr,2007", + "1212,Y,1,492,2,1,Sawyer,1977", + "926,Y,1,484,2,1,OldTown,1929", + "764,N,0,504,2,1,OldTown,1925", + "1422,Y,1,576,2,1,NWAmes,1981", + "802,Y,1,413,2,1,Gilbert,1997", + "1052,Y,1,240,1,1,SWISU,1939", + "778,Y,1,924,1,1,IDOTRR,1940", + "1113,Y,1,504,1,1,Edwards,1976", + "1095,Y,1,1053,3,1,Somerst,2006", + "1363,Y,1,439,2,1,ClearCr,1954", + "1164,Y,1,671,3,1,NoRidge,1999", + "1632,Y,1,338,1,1,NAmes,1958", + "816,Y,1,264,1,1,Mitchel,1982", + "952,N,1,672,2,1,BrkSide,1925", + "1560,Y,1,573,2,1,Mitchel,2003", + "864,Y,1,400,2,1,Gilbert,1951", + "2121,Y,1,732,3,1,NridgHt,2006", + "1156,Y,1,505,2,1,ClearCr,1977", + "1175,Y,1,575,2,1,SawyerW,1989", + "1262,Y,1,572,2,1,Somerst,2006", + "1314,Y,1,626,2,1,NridgHt,2007", + "1468,Y,1,898,3,1,Somerst,2008", + "1575,Y,1,529,2,1,SawyerW,1992", + "625,Y,1,528,2,1,Somerst,2006", + "1085,Y,1,440,2,1,NAmes,1962", + "858,Y,1,0,0,1,NAmes,1971", + "900,Y,1,280,1,1,Sawyer,1967", + "698,Y,0,384,1,1,BrkSide,1915", + "1079,Y,1,685,2,1,CollgCr,2006", + "936,N,1,0,0,1,SWISU,1912", + "1148,Y,1,281,1,1,Edwards,1949", + "1468,Y,1,539,2,1,NWAmes,1977", + "1644,Y,1,418,2,1,NAmes,1953", + "1003,Y,1,588,2,1,Mitchel,1984", + "910,Y,0,282,1,1,IDOTRR,1950", + "975,Y,1,576,2,1,Somerst,1997", + "1041,Y,1,539,2,1,NWAmes,1968", + "1152,Y,1,300,1,1,Crawfor,1950", + "1336,Y,1,375,1,1,Crawfor,1953", + "1210,Y,1,683,2,1,CollgCr,1998", + "1541,Y,1,843,3,1,CollgCr,2001", + "894,Y,1,552,2,1,CollgCr,1972", + "1675,Y,1,870,3,1,OldTown,1880", + "2000,Y,1,888,3,1,CollgCr,2004", + "1122,Y,1,746,3,1,SawyerW,1990", + "1035,Y,0,0,0,1,IDOTRR,1920", + "861,Y,1,539,2,1,Edwards,1940", + "1944,Y,1,708,3,1,NridgHt,2003", + "697,Y,1,420,2,1,Gilbert,1993", + "972,Y,1,240,1,1,NAmes,1948", + "793,Y,1,410,2,1,OldTown,1939", + "2036,Y,1,513,2,1,Timber,1965", + "832,Y,1,546,2,1,OldTown,1925", + "716,Y,1,432,2,1,Gilbert,2004", + "1153,Y,1,484,2,1,NWAmes,1980", + "1088,Y,1,1025,3,1,Somerst,2006", + "1372,Y,1,656,3,1,NoRidge,1993", + "1472,Y,1,588,2,1,NWAmes,1980", + "1249,Y,1,840,3,1,NridgHt,2006", + "1136,Y,1,872,3,1,NridgHt,2004", + "1553,Y,1,576,2,1,SawyerW,1986", + "1163,Y,1,220,1,1,OldTown,1955", + "1898,Y,1,564,2,1,NAmes,1967", + "803,N,0,360,2,1,IDOTRR,1941", + "1719,Y,1,473,2,1,Veenker,1993", + "1383,Y,1,292,1,1,NAmes,1960", + "1445,N,0,441,2,1,BrkSide,1916", + "596,N,1,189,1,1,IDOTRR,1920", + "1728,Y,1,352,1,1,NAmes,1964", + "1056,Y,1,308,1,1,NAmes,1958", + "1629,Y,1,880,3,1,NridgHt,2003", + "1358,Y,1,484,2,1,NridgHt,2004", + "943,Y,1,472,2,1,Gilbert,1998", + "1619,Y,1,529,2,1,Timber,1965", + "1922,Y,1,676,3,1,StoneBr,2005", + "1536,Y,1,532,2,1,CollgCr,2002", + "1621,Y,1,440,2,1,NWAmes,1984", + "1215,Y,0,297,1,1,NAmes,1958", + "993,Y,1,431,2,1,Timber,2002", + "841,Y,1,294,1,1,SawyerW,1950", + "1040,N,0,400,2,0,NAmes,1949", + "1684,Y,1,564,2,1,NridgHt,2005", + "536,Y,1,336,1,1,MeadowV,1976", + "972,Y,1,312,1,1,BrkSide,1939", + "958,Y,1,301,1,1,NAmes,1960", + "1478,Y,0,498,2,1,NAmes,1960", + "764,Y,1,474,2,1,NridgHt,2003", + "1848,Y,1,706,3,1,NridgHt,2005", + "1869,Y,1,617,2,1,NridgHt,2007", + "1453,Y,1,445,2,1,ClearCr,1986", + "616,N,1,200,1,1,Edwards,1941", + "720,Y,1,484,2,1,OldTown,1928", + "1192,Y,1,240,1,1,SWISU,1940", + "1200,Y,1,521,2,1,CollgCr,1995", + "1167,Y,1,400,2,1,Gilbert,1992", + "1142,Y,1,528,2,1,MeadowV,1976", + "1352,Y,0,288,1,1,ClearCr,1958", + "1086,Y,1,592,2,1,NoRidge,1998", + "912,Y,1,470,2,1,Mitchel,1978", + "988,Y,1,240,1,1,BrkSide,1940", + "495,Y,1,672,2,1,Edwards,2003", + "483,Y,1,264,1,1,BrDale,1972", + "790,Y,1,566,2,1,NWAmes,1976", + "672,Y,0,468,1,1,IDOTRR,1920", + "1394,Y,1,514,2,1,NAmes,1963", + "1431,Y,1,296,1,1,NAmes,1962", + "1268,Y,1,244,1,1,NAmes,1954", + "1287,Y,1,576,2,1,NAmes,1959", + "953,Y,1,460,2,1,Gilbert,2000", + "1120,Y,1,680,2,1,ClearCr,1959", + "752,Y,1,264,1,1,SawyerW,1984", + "1319,Y,1,270,1,1,NAmes,1953", + "847,Y,1,434,2,1,CollgCr,2003", + "904,N,0,0,0,1,Edwards,1922", + "914,Y,1,576,2,1,CollgCr,1996", + "1580,Y,1,610,2,1,Somerst,2004", + "1856,Y,1,834,3,1,StoneBr,2010", + "1007,Y,1,463,2,1,Gilbert,2000", + "1026,Y,1,308,1,1,SWISU,1924", + "1301,Y,1,572,2,1,Somerst,2006", + "939,Y,1,639,2,1,CollgCr,2006", + "784,N,0,360,2,1,SawyerW,1928", + "1079,Y,1,501,2,1,ClearCr,1992", + "1269,Y,1,430,2,1,Blmngtn,2004", + "658,Y,1,0,0,1,Edwards,1910", + "1125,Y,1,352,1,1,Edwards,1976", + "1479,Y,1,577,2,1,CollgCr,1999", + "1742,Y,1,846,3,1,NridgHt,2007", + "961,Y,1,384,1,1,Edwards,1900", + "804,Y,1,560,2,1,Mitchel,2001", + "882,Y,1,294,1,1,NAmes,1959", + "788,Y,0,0,0,1,BrkSide,1941", + "735,Y,0,240,1,1,OldTown,1940", + "1144,Y,1,596,1,1,Edwards,1956", + "894,Y,1,600,2,1,CollgCr,1972", + "876,Y,1,264,1,1,NAmes,1962", + "1077,Y,0,338,1,1,IDOTRR,1920", + "1112,Y,1,438,2,1,Somerst,2006", + "1288,Y,1,500,2,1,Veenker,1996", + "1310,Y,1,400,2,1,CollgCr,2005", + "672,Y,1,240,1,1,Sawyer,1940", + "1165,Y,1,420,2,1,NoRidge,1998", + "806,Y,1,373,2,1,Gilbert,1995", + "1620,Y,1,490,2,1,Sawyer,1976", + "1166,Y,0,240,1,1,SWISU,1936", + "840,Y,1,308,1,1,Crawfor,1915", + "1071,Y,1,947,3,1,NridgHt,2006", + "1050,Y,1,836,3,1,Somerst,2007", + "1276,Y,0,350,1,1,Edwards,1958", + "1056,N,1,572,2,1,Gilbert,1955", + "1478,Y,1,484,2,1,Somerst,2009", + "1028,Y,0,360,2,1,OldTown,1927", + "1080,Y,1,678,3,1,SawyerW,1993", + "1340,Y,1,396,2,1,Gilbert,2007", + "672,Y,1,440,2,1,Sawyer,1978", + "1370,N,0,864,3,1,Crawfor,1918", + "756,N,0,240,1,1,Edwards,1940", + "1056,Y,1,304,1,1,NAmes,1968", + "1344,Y,1,784,4,1,Mitchel,1997", + "1602,Y,1,529,2,1,NWAmes,1977", + "988,Y,0,520,2,1,Mitchel,1954", + "1470,Y,1,696,3,1,NoRidge,1998", + "1196,Y,0,297,1,1,NAmes,1956", + "707,Y,1,240,1,1,OldTown,1946", + "1644,Y,1,569,2,1,SawyerW,1989", + "907,Y,1,352,1,1,NAmes,1957", + "1208,Y,1,628,2,1,CollgCr,2007", + "1412,Y,1,576,2,1,Timber,1988", + "483,Y,1,264,1,1,BrDale,1971", + "1088,Y,1,0,0,1,OldTown,1920", + "765,Y,1,440,2,1,BrDale,1971", + "926,Y,1,470,2,1,Gilbert,1997", + "630,Y,1,0,0,1,MeadowV,1972", + "827,Y,1,550,2,1,CollgCr,1996", + "734,Y,0,440,2,1,OldTown,1920", + "904,Y,1,180,1,1,BrkSide,1926", + "694,N,1,352,1,1,Crawfor,1913", + "684,Y,1,528,2,1,Edwards,1920", + "2402,Y,1,672,3,1,NridgHt,2008", + "1440,N,0,0,0,1,Edwards,1955", + "1128,Y,1,360,2,1,BrkSide,1930", + "1258,Y,1,648,3,1,Blmngtn,2006", + "933,Y,1,493,2,1,CollgCr,1994", + "1689,Y,1,480,2,1,Edwards,1956", + "1888,Y,1,578,2,1,NAmes,1966", + "956,Y,1,431,2,1,Gilbert,1998", + "780,Y,1,198,1,1,IDOTRR,1937", + "679,Y,1,308,1,1,OldTown,1948", + "813,Y,0,270,1,1,OldTown,1930", + "1533,Y,1,576,2,1,ClearCr,1975", + "888,Y,1,422,2,1,Timber,1996", + "786,Y,1,676,2,1,Somerst,2008", + "1728,Y,1,560,2,1,Mitchel,1976", + "1242,Y,1,528,2,1,NWAmes,1973", + "624,N,1,513,3,1,OldTown,1916", + "1663,Y,1,529,2,1,ClearCr,1954", + "833,Y,1,228,1,1,OldTown,1925", + "979,Y,1,352,1,1,BrkSide,1950", + "832,Y,1,552,2,1,Somerst,2009", + "575,Y,1,576,2,1,SWISU,1936", + "864,Y,1,360,1,1,Sawyer,1965", + "849,Y,0,240,1,1,Crawfor,1934", + "1040,Y,1,0,0,1,CollgCr,1978", + "1414,Y,1,398,2,1,Blmngtn,2004", + "1277,Y,1,526,2,1,NAmes,1970", + "888,Y,1,312,1,1,NAmes,1942", + "1634,Y,1,866,3,1,NridgHt,2006", + "832,Y,1,506,2,1,SawyerW,1993", + "1502,Y,1,528,2,1,StoneBr,1985", + "1161,Y,1,534,2,1,NWAmes,1977", + "1072,Y,1,525,2,1,Edwards,2005", + "1976,Y,1,908,3,1,NridgHt,2006", + "1652,Y,1,499,2,1,StoneBr,2000", + "970,Y,1,624,2,1,Sawyer,1963", + "1493,Y,1,508,2,1,CollgCr,1997", + "2069,Y,1,694,3,1,NridgHt,2006", + "1718,Y,1,826,3,1,CollgCr,2007", + "1131,Y,1,672,2,1,OldTown,1937", + "1850,Y,1,772,3,1,NridgHt,2004", + "1792,Y,1,874,3,1,NridgHt,2003", + "916,Y,1,164,1,1,OldTown,1915", + "1216,Y,1,402,2,1,Mitchel,1998", + "999,Y,1,264,1,1,Sawyer,1962", + "1113,Y,1,264,1,1,NAmes,1950", + "1073,Y,1,515,2,1,NAmes,1965", + "1484,Y,1,487,2,1,NWAmes,1971", + "1766,N,0,520,2,1,OldTown,1900", + "630,Y,1,286,1,1,MeadowV,1970", + "616,Y,1,336,1,1,MeadowV,1976", + "958,Y,0,240,1,1,NAmes,1941", + "728,Y,1,429,2,1,Gilbert,2006", + "1269,Y,0,308,1,1,NAmes,1960", + "886,Y,0,273,1,1,OldTown,1938", + "720,N,0,0,0,1,IDOTRR,1920", + "3228,Y,1,546,2,1,NoRidge,1992", + "1133,Y,1,240,1,1,BrkSide,1925", + "899,Y,1,288,1,1,Sawyer,1967", + "912,Y,1,297,1,1,NAmes,1958", + "672,Y,1,264,1,1,BrDale,1973", + "866,Y,1,603,2,1,Somerst,2005", + "1214,Y,1,461,2,1,Edwards,1965", + "1801,Y,1,484,2,1,Crawfor,1959", + "855,Y,1,440,2,1,NPkVill,1974", + "960,Y,1,400,2,1,OldTown,1952", + "1065,Y,1,471,2,1,SawyerW,1993", + "1218,Y,1,676,2,1,Somerst,2009", + "689,Y,1,360,2,1,OldTown,1928", + "1041,Y,1,270,1,1,NAmes,1959", + "1363,Y,1,288,1,1,NAmes,1951", + "1368,Y,1,474,2,1,NridgHt,2005", + "864,Y,1,624,2,1,NAmes,1958", + "1080,Y,1,484,2,1,Mitchel,1983", + "789,N,1,200,1,0,Crawfor,1926", + "2020,Y,1,900,3,1,NridgHt,2009", + "1378,Y,1,583,2,1,NWAmes,1972", + "1277,Y,1,889,3,1,NoRidge,1996", + "882,Y,1,546,2,1,CollgCr,1998", + "1276,Y,1,282,1,1,Crawfor,1926", + "694,N,0,0,0,1,OldTown,1900", + "1244,Y,0,336,1,1,NAmes,1957", + "1004,Y,1,420,2,1,BrkSide,1947", + "3138,Y,1,884,3,1,Edwards,2007", + "1383,Y,1,834,3,1,NoRidge,1996", + "1266,Y,1,453,2,1,Somerst,2005", + "928,Y,1,252,1,1,NAmes,1956", + "1476,Y,1,858,3,1,NridgHt,2008", + "605,Y,1,0,0,1,Edwards,1920", + "2515,Y,1,484,2,1,Crawfor,1957", + "1509,Y,1,600,2,1,Timber,1988", + "751,Y,1,502,2,1,BrkSide,1920", + "827,Y,1,392,1,1,NAmes,1955", + "334,N,0,0,0,1,BrkSide,1946", + "707,Y,1,403,2,1,Gilbert,2004", + "820,Y,1,0,0,1,Edwards,1910", + "880,Y,1,527,2,1,CollgCr,1998", + "864,Y,1,576,2,1,NAmes,1972", + "1159,Y,1,336,1,1,Sawyer,1968", + "1601,Y,1,670,2,1,CollgCr,2001", + "1838,Y,1,765,3,1,Timber,2006", + "997,Y,1,648,3,1,NoRidge,2000", + "1680,Y,1,583,2,1,NWAmes,1998", + "767,Y,1,367,1,1,NAmes,1998", + "664,Y,1,426,2,1,Gilbert,2006", + "1377,Y,1,786,3,1,NWAmes,1988", + "915,Y,0,440,2,1,BrkSide,1923", + "768,Y,1,624,2,1,Mitchel,1970", + "825,Y,1,720,2,1,OldTown,1955", + "912,Y,1,615,2,1,Somerst,2003", + "1069,Y,1,440,2,1,NPkVill,1977", + "928,Y,0,288,1,1,OldTown,1957", + "1717,Y,1,908,3,1,NridgHt,2006", + "1126,Y,1,520,2,1,Edwards,1949", + "1006,Y,1,871,3,1,NridgHt,2003", + "1048,Y,0,280,1,1,BrkSide,1922", + "1092,Y,0,299,1,1,NAmes,1957", + "897,Y,1,570,1,1,IDOTRR,1920", + "729,Y,1,406,2,1,Gilbert,1996", + "1557,Y,1,420,2,1,Blmngtn,2003", + "1392,Y,1,528,2,1,Sawyer,1957", + "1389,Y,1,418,2,1,Mitchel,1974", + "996,Y,1,0,0,1,Edwards,1940", + "1163,Y,1,396,2,1,Edwards,1918", + "1166,Y,1,590,2,1,NoRidge,1992", + "841,N,0,216,1,1,SWISU,1915", + "1134,Y,1,656,3,1,StoneBr,2005", + "1535,Y,1,532,2,1,NridgHt,2004", + "1496,Y,1,612,2,1,SawyerW,1983", + "943,Y,1,600,2,1,NAmes,1979", + "1728,Y,1,576,2,1,NAmes,1965", + "864,Y,1,288,1,1,NAmes,1959", + "846,Y,1,650,2,1,Timber,2009", + "774,Y,1,400,2,1,Gilbert,2000", + "576,Y,1,288,1,1,NAmes,1971", + "832,Y,1,336,1,1,NAmes,1947", + "877,Y,1,216,1,1,SWISU,1928", + "1320,Y,1,564,2,1,Sawyer,1966", + "703,Y,1,540,2,1,Somerst,2007", + "1050,N,0,352,1,1,OldTown,1954", + "1429,Y,1,572,2,1,NAmes,1960", + "2042,Y,1,1390,3,1,NridgHt,2008", + "816,Y,1,0,0,1,Edwards,1990", + "1521,Y,1,880,2,1,OldTown,1893", + "989,Y,1,240,1,1,BrkSide,1935", + "2028,Y,1,880,3,1,Timber,2005", + "838,Y,1,275,1,1,BrkSide,1918", + "860,Y,1,528,2,1,Sawyer,1982", + "1473,Y,1,452,1,1,ClearCr,1968", + "779,Y,0,308,1,1,BrkSide,1930", + "770,Y,1,520,2,1,CollgCr,2004", + "1728,Y,1,842,3,1,NridgHt,2008", + "816,Y,1,816,2,1,Mitchel,1982", + "848,Y,1,420,2,1,CollgCr,2003", + "924,Y,1,280,1,1,CollgCr,1975", + "1826,Y,1,758,3,1,StoneBr,2005", + "684,N,0,216,1,1,OldTown,1910", + "1402,Y,1,648,3,1,Blmngtn,2006", + "1647,Y,1,621,2,1,Crawfor,1977", + "716,Y,1,452,2,1,Blueste,1980", + "1058,Y,1,736,2,1,NridgHt,2005", + "780,Y,1,544,1,1,IDOTRR,1937", + "927,Y,1,506,2,1,SawyerW,1992", + "600,Y,1,480,2,1,Somerst,2004", + "1494,Y,1,530,2,1,CollgCr,2002", + "1186,Y,1,486,2,1,NAmes,1965", + "1040,Y,1,576,2,1,CollgCr,1996", + "1112,Y,1,230,1,1,Edwards,1948", + "1940,Y,1,380,2,1,Crawfor,1934", + "1029,Y,1,261,1,1,Sawyer,1961", + "1476,Y,1,736,3,1,CollgCr,2000", + "1032,Y,1,564,2,1,NWAmes,1978", + "1299,Y,1,531,2,1,CollgCr,2001", + "1120,Y,1,0,0,1,Mitchel,2007", + "630,Y,1,0,0,1,MeadowV,1972", + "1054,Y,1,480,2,1,NAmes,1963", + "807,Y,1,393,2,1,Gilbert,2002", + "832,Y,1,528,2,1,NAmes,1954", + "1828,Y,1,774,3,1,NridgHt,2007", + "1482,Y,1,749,3,1,Timber,2003", + "864,N,0,0,0,1,Edwards,1914", + "1548,Y,1,624,2,1,NWAmes,1974", + "980,Y,1,484,2,1,Sawyer,1977", + "756,Y,1,440,2,1,Somerst,2000", + "1012,Y,1,484,2,1,NWAmes,1972", + "1116,Y,1,440,2,1,NAmes,1962", + "1422,Y,1,286,1,1,NAmes,1960", + "1520,Y,1,364,1,1,NAmes,1955", + "1040,Y,1,504,2,1,NAmes,1969", + "1350,Y,1,520,2,1,NAmes,1964", + "1089,Y,0,240,1,1,OldTown,1880", + "1554,Y,1,627,2,1,NridgHt,2006", + "1411,Y,1,544,2,1,NWAmes,1977", + "1056,Y,1,260,1,1,NAmes,1954", + "1056,Y,1,576,2,1,OldTown,1980", + "1440,Y,0,0,0,1,SWISU,1914", + "800,N,0,0,0,0,BrkSide,1936", + "811,Y,0,256,1,1,OldTown,1954", + "796,Y,0,0,0,1,Edwards,1910", + "1567,Y,1,648,3,1,Blmngtn,2006", + "1518,Y,1,588,2,1,NridgHt,2003", + "1057,Y,1,650,2,1,Somerst,2001", + "2000,Y,1,538,2,1,NAmes,1972", + "780,Y,1,462,2,1,NWAmes,1969", + "1766,Y,1,478,3,1,Somerst,2009", + "981,Y,1,576,2,1,NAmes,1971", + "1048,Y,1,420,2,1,NAmes,1950", + "1094,Y,1,495,2,1,Edwards,1953", + "1051,Y,1,442,2,1,NAmes,1966", + "630,Y,1,0,0,1,MeadowV,1970", + "822,Y,1,562,2,1,Somerst,2007", + "755,Y,1,296,1,1,Edwards,1940", + "909,Y,1,512,2,1,CollgCr,1996", + "756,Y,1,216,1,1,IDOTRR,1906", + "2113,Y,1,839,3,1,NoRidge,1995", + "525,Y,1,264,1,1,BrDale,1971", + "1053,Y,1,312,1,1,NAmes,1959", + "851,Y,1,270,1,1,Crawfor,1931", + "912,Y,1,330,1,1,NAmes,1948", + "1486,Y,1,480,2,1,Edwards,1964", + "1142,Y,1,550,2,1,NWAmes,1976", + "1686,Y,1,711,3,1,NoRidge,1994", + "1392,Y,1,576,2,1,NAmes,1968", + "1181,Y,1,588,2,1,Edwards,1972", + "2097,Y,1,1134,3,1,Somerst,2005", + "1454,Y,1,504,2,1,Gilbert,2000", + "1465,Y,1,596,2,1,NAmes,1965", + "1679,Y,1,575,2,1,SawyerW,1994", + "1437,Y,1,576,2,1,Sawyer,1956", + "1180,Y,1,252,1,1,Crawfor,1922", + "738,Y,1,540,2,1,CollgCr,2005", + "697,N,1,300,1,1,Edwards,1925", + "1208,Y,1,546,2,1,Veenker,1977", + "1839,Y,1,416,2,1,Crawfor,1957", + "1136,Y,1,384,1,1,NAmes,1965", + "855,Y,1,440,2,1,NPkVill,1978", + "1095,N,1,779,3,1,OldTown,1900", + "792,Y,1,240,1,1,OldTown,1924", + "2046,Y,1,834,3,1,StoneBr,2008", + "988,Y,1,572,2,1,Sawyer,1961", + "923,Y,1,264,1,1,SawyerW,1980", + "848,Y,1,281,1,1,SWISU,1932", + "1291,Y,1,431,2,1,ClearCr,1996", + "1668,Y,1,702,3,1,CollgCr,2002", + "1195,Y,1,486,2,1,NoRidge,1998", + "1190,Y,1,577,2,1,StoneBr,1984", + "874,Y,1,578,2,1,Somerst,2007", + "551,Y,1,480,2,1,Somerst,2004", + "1419,Y,1,567,2,1,StoneBr,2007", + "1362,Y,1,460,2,1,NridgHt,2005", + "848,Y,1,420,2,1,CollgCr,2004", + "2444,Y,1,832,3,1,NoRidge,1994", + "1238,Y,1,628,2,1,Timber,1989", + "1073,Y,1,326,1,1,OldTown,1921", + "1067,Y,1,576,2,1,BrkSide,1936", + "1137,Y,1,551,2,1,Timber,1987", + "616,Y,1,205,1,1,BrkSide,1921", + "1148,Y,0,308,1,1,Edwards,1952", + "894,Y,1,336,1,1,Sawyer,1965", + "1391,Y,1,530,2,1,Somerst,2004", + "1800,Y,1,765,3,1,Timber,2002", + "1164,Y,1,528,2,1,NWAmes,1969", + "1264,Y,1,666,3,1,StoneBr,2006", + "1032,Y,1,672,2,1,OldTown,1900", + "1484,Y,1,606,2,1,CollgCr,2004", + "372,N,1,0,0,1,IDOTRR,1930", + "1824,Y,1,739,2,1,ClearCr,1971", + "1324,Y,1,550,2,1,NridgHt,2006", + "728,Y,1,400,2,1,Gilbert,2007", + "904,Y,1,408,1,1,Sawyer,1966", + "729,Y,1,0,0,1,BrkSide,1935", + "859,Y,1,384,1,1,IDOTRR,1900", + "1228,Y,1,472,2,1,StoneBr,1988", + "960,N,1,576,2,1,BrkSide,1970", + "725,Y,1,475,2,1,Sawyer,1976", + "1350,Y,1,478,2,1,NWAmes,1974", + "1576,Y,1,704,2,1,OldTown,1890", + "1178,Y,1,439,2,1,NWAmes,1973", + "1325,Y,1,983,3,1,NoRidge,1993", + "971,Y,1,300,1,1,CollgCr,1969", + "1742,Y,1,564,2,1,StoneBr,1985", + "848,Y,1,420,2,1,CollgCr,2004", + "864,Y,1,463,2,1,NAmes,1970", + "997,Y,1,548,1,1,Edwards,1954", + "1698,Y,1,768,3,1,Edwards,2007", + "864,Y,1,660,2,1,NAmes,1970", + "1680,Y,1,540,2,1,Crawfor,1988", + "1232,Y,1,632,2,1,CollgCr,2007", + "1776,Y,1,888,3,1,NAmes,1958", + "848,Y,0,539,2,1,IDOTRR,1925", + "1616,Y,1,608,2,1,StoneBr,1995", + "1146,Y,1,438,2,1,Timber,2003", + "1153,Y,1,541,2,1,CollgCr,1998", + "1144,Y,1,264,1,1,Sawyer,1961", + "948,Y,1,300,1,1,Sawyer,1968", + "880,N,1,320,2,1,OldTown,1914", + "1040,N,0,400,2,1,NAmes,1950", + "901,Y,1,800,3,1,Gilbert,2005", + "1200,Y,1,0,0,1,Edwards,1987", + "864,Y,1,572,2,1,CollgCr,2004", + "768,Y,1,360,2,1,OldTown,1910", + "912,Y,1,288,1,1,Sawyer,1961", + "1349,Y,1,539,2,1,SawyerW,2000", + "1464,Y,1,480,2,1,Sawyer,1963", + "1337,Y,1,462,2,1,StoneBr,1993", + "1175,Y,1,831,2,1,NWAmes,1976", + "980,Y,1,554,2,1,Gilbert,2000", + "1320,N,1,864,4,1,OldTown,1880", + "1720,Y,1,527,2,1,NoRidge,1996", + "1088,N,0,240,1,0,Edwards,1945", + "792,Y,1,0,0,1,OldTown,1910", + "660,Y,1,400,2,1,Gilbert,2003", + "1494,Y,1,576,2,1,CollgCr,1997", + "1038,Y,1,878,3,1,NridgHt,2005", + "1026,Y,1,440,2,1,NAmes,1969", + "742,Y,1,440,2,1,Somerst,1999", + "866,Y,1,578,2,1,CollgCr,2007", + "672,Y,1,440,2,1,Sawyer,1978", + "757,Y,1,440,2,1,Somerst,1999", + "1328,Y,1,752,3,1,NoRidge,1995", + "864,Y,1,300,1,1,NAmes,1959", + "1301,Y,1,440,2,1,BrkSide,1924", + "764,Y,1,614,2,1,Somerst,2009", + "1268,Y,1,856,3,1,NoRidge,1999", + "1494,Y,1,481,2,1,Veenker,1995", + "1506,Y,1,592,2,1,Somerst,2008", + "980,Y,1,496,2,1,NWAmes,1988", + "983,Y,1,423,1,1,Mitchel,1940", + "1836,Y,1,484,2,1,CollgCr,2004", + "1690,Y,1,841,3,1,StoneBr,2003", + "858,Y,1,576,2,1,Sawyer,1982", + "1220,Y,0,396,2,1,Edwards,1951", + "1117,Y,1,672,2,1,Edwards,1976", + "912,Y,0,275,1,1,NAmes,1958", + "1973,Y,1,895,3,1,NridgHt,2006", + "1204,Y,1,412,2,1,Mitchel,1998", + "1614,Y,1,865,3,1,CollgCr,2005", + "894,Y,1,440,2,1,Sawyer,1974", + "2020,Y,1,630,2,1,SawyerW,1977", + "1004,Y,1,504,2,1,Mitchel,1977", + "1253,Y,1,402,2,1,Gilbert,1995", + "810,Y,1,484,2,1,SawyerW,1992", + "1430,Y,1,605,2,1,CollgCr,2001", + "1110,Y,1,602,2,1,Mitchel,1978", + "742,Y,1,0,0,1,OldTown,1914", + "1342,Y,1,457,2,1,NWAmes,1967", + "966,Y,1,416,1,1,OldTown,1915", + "956,Y,1,618,2,1,SawyerW,2004", + "901,Y,1,281,1,1,OldTown,1954", + "976,Y,1,444,2,1,ClearCr,1966", + "1145,Y,1,397,2,1,Blmngtn,2005", + "1062,Y,1,539,2,1,Mitchel,1976", + "1127,Y,1,455,2,1,NoRidge,1996", + "1496,Y,1,474,2,1,Somerst,2007", + "1086,Y,1,409,2,1,Gilbert,1994", + "888,Y,1,476,2,1,SawyerW,1980", + "1285,Y,1,528,2,1,Sawyer,1977", + "773,Y,1,240,1,1,NAmes,1953", + "1966,Y,1,820,3,1,NridgHt,2008", + "981,Y,1,240,1,1,SWISU,1937", + "616,Y,1,603,2,1,ClearCr,1997", + "1196,Y,0,440,2,1,IDOTRR,1916", + "728,Y,1,410,2,1,CollgCr,2005", + "1734,Y,1,1020,3,1,NridgHt,2008", + "1128,Y,1,286,1,1,NAmes,1954", + "1428,Y,1,554,2,1,Somerst,2008", + "980,Y,1,384,1,1,NAmes,1967", + "1072,Y,1,528,2,1,BrkSide,1923", + "1086,Y,1,484,2,1,NAmes,1966", + "1075,N,0,360,2,1,OldTown,1898", + "1309,Y,1,484,2,1,NWAmes,1974", + "848,Y,1,420,2,1,CollgCr,2004", + "1044,N,0,504,2,1,IDOTRR,1952", + "1442,Y,1,301,1,1,NAmes,1958", + "686,Y,1,280,1,1,Edwards,1918", + "1661,Y,1,598,2,1,CollgCr,1998", + "1008,Y,1,275,1,1,NAmes,1954", + "1689,Y,1,857,3,1,Mitchel,2002", + "1052,Y,1,440,2,1,ClearCr,1971", + "1358,Y,1,484,2,1,NridgHt,2009", + "798,Y,1,595,2,1,CollgCr,2003", + "936,N,1,576,2,1,OldTown,1953", + "847,Y,1,433,2,1,Gilbert,2003", + "944,Y,1,240,1,1,SWISU,1940", + "1489,Y,1,776,2,1,Somerst,2006", + "2084,Y,1,1220,3,1,NridgHt,2007", + "784,Y,1,0,0,1,BrkSide,1924", + "1434,Y,1,527,2,1,SawyerW,2001", + "1160,Y,1,538,2,1,ClearCr,1967", + "520,Y,1,480,2,1,Somerst,2005", + "1392,Y,0,458,2,1,NAmes,1957", + "520,Y,1,480,2,1,Somerst,2005", + "941,Y,1,613,2,1,CollgCr,2003", + "1516,Y,1,472,2,1,NAmes,1964", + "1144,Y,1,456,2,1,NAmes,1961", + "1067,Y,1,436,2,1,Sawyer,1950", + "1559,Y,1,812,2,1,OldTown,1948", + "483,Y,1,264,1,1,BrDale,1973", + "1099,Y,1,352,1,1,CollgCr,1995", + "768,Y,1,240,1,1,Edwards,1946", + "810,N,1,400,1,1,SWISU,1925", + "958,Y,1,686,2,1,OldTown,1904", + "1165,Y,1,490,2,1,NAmes,1966", + "1800,N,1,0,0,1,NAmes,1961", + "876,Y,1,720,3,1,OldTown,1915", + "1701,Y,1,611,2,1,Sawyer,1975", + "1006,Y,1,425,2,1,SawyerW,1993", + "864,Y,1,338,1,1,CollgCr,1972", + "1307,Y,1,360,2,1,ClearCr,1908", + "1094,Y,1,512,2,1,Veenker,1976", + "848,Y,1,420,2,1,CollgCr,2003", + "1456,Y,1,400,2,1,Blmngtn,2003", + "918,Y,1,240,1,1,Crawfor,1941", + "1445,Y,1,645,2,1,NAmes,1964", + "1779,Y,1,454,2,1,Sawyer,1955", + "1040,Y,1,260,1,1,Sawyer,1962", + "1026,Y,1,576,2,1,CollgCr,1978", + "702,Y,1,343,2,1,Gilbert,1994", + "1370,Y,1,479,2,1,NWAmes,1976", + "1512,Y,1,619,2,1,NWAmes,1968", + "912,Y,1,216,1,1,Crawfor,1918", + "1039,Y,1,504,2,1,Sawyer,1965", + "1097,Y,1,480,2,1,SawyerW,1984", + "1148,Y,1,672,1,1,NAmes,1959", + "1372,Y,1,529,2,1,Somerst,2007", + "1002,Y,1,902,2,1,NAmes,1970", + "1646,Y,1,870,3,1,Timber,2006", + "1120,Y,1,544,2,1,Sawyer,1961", + "1547,Y,1,672,2,1,Gilbert,1948", + "1062,Y,1,574,2,1,SawyerW,1993", + "894,N,1,308,1,1,NAmes,1962", + "804,Y,1,523,2,1,CollgCr,1998", + "910,Y,1,414,2,1,NAmes,1953", + "1036,Y,1,288,1,1,NAmes,1949", + "676,Y,1,200,1,1,OldTown,1941", + "1184,Y,1,550,2,1,Somerst,2007", + "1040,Y,1,648,2,1,Mitchel,1963", + "1462,Y,1,738,3,1,NridgHt,2004", + "1155,Y,1,576,2,1,Sawyer,1961", + "864,Y,1,336,1,1,CollgCr,1978", + "1090,Y,1,450,2,1,Edwards,2005", + "1187,Y,1,400,2,1,Timber,1990", + "808,Y,1,389,2,1,Gilbert,1992", + "954,N,1,440,1,1,SWISU,1912", + "892,Y,1,288,1,1,NAmes,1967", + "1709,Y,1,506,2,1,Somerst,1999", + "1712,Y,1,588,2,1,NAmes,1959", + "872,Y,1,300,1,1,Edwards,1955", + "2217,Y,1,621,2,1,NAmes,1970", + "1505,Y,1,505,2,1,NAmes,1953", + "672,Y,1,576,2,1,NAmes,1949", + "918,Y,1,440,2,1,Sawyer,1978", + "1068,Y,1,264,1,1,Sawyer,1963", + "1383,Y,1,354,1,1,Sawyer,1954", + "1535,Y,1,400,2,1,SawyerW,1979", + "983,Y,1,483,2,1,NAmes,1963", + "951,N,1,327,1,1,IDOTRR,1936", + "1120,Y,1,528,2,1,SawyerW,1979", + "2364,Y,1,820,3,1,NridgHt,2009", + "1236,Y,1,288,1,1,Sawyer,1961", + "858,Y,1,684,1,1,NAmes,1971", + "1306,Y,1,756,1,1,NAmes,1957", + "807,Y,1,393,2,1,Gilbert,2003", + "1670,Y,1,690,3,1,Gilbert,2006", + "902,Y,1,288,1,1,Sawyer,1967", + "1063,Y,1,280,1,1,NAmes,1954", + "1636,Y,1,865,3,1,Somerst,2006", + "1020,Y,1,180,1,1,Crawfor,1936", + "902,Y,1,484,2,1,Mitchel,1983", + "742,Y,1,390,2,1,Gilbert,2005", + "1105,Y,0,480,2,1,NAmes,1960", + "1268,Y,1,252,1,1,CollgCr,1977", + "1015,N,1,450,1,1,BrkSide,1925", + "1001,N,0,871,3,1,Crawfor,1949", + "612,Y,1,528,2,1,Somerst,2009", + "546,Y,1,286,1,1,MeadowV,1970", + "480,N,0,308,1,1,IDOTRR,1949", + "1229,Y,0,284,1,1,Edwards,1956", + "912,Y,1,833,3,1,SawyerW,1991", + "1414,Y,1,601,2,1,NAmes,1958", + "936,Y,1,471,2,1,SawyerW,1994", + "1272,Y,1,0,0,1,Edwards,1900", + "1316,Y,1,397,2,1,Gilbert,2005", + "1617,Y,1,533,2,1,SawyerW,1993", + "1686,Y,1,612,2,1,NWAmes,1980", + "1126,Y,1,540,2,1,NWAmes,1977", + "1234,Y,1,656,3,1,NridgHt,2003", + "1098,Y,1,486,2,1,NWAmes,1968", + "1788,Y,1,522,2,1,CollgCr,2001", + "993,Y,1,642,2,1,Gilbert,1997", + "1466,Y,1,610,3,1,Timber,2007", + "925,Y,1,429,1,1,NAmes,1965", + "1905,Y,1,788,3,1,Somerst,2006", + "1500,Y,1,570,2,1,CollgCr,2004", + "2069,Y,1,505,2,1,NAmes,1960", + "747,Y,1,528,2,1,BrkSide,1926", + "1200,Y,1,555,2,1,SawyerW,2003", + "1113,Y,1,689,2,1,Somerst,2005", + "1391,Y,1,868,3,1,CollgCr,2006", + "1207,Y,1,349,1,1,ClearCr,1940", + "1728,Y,1,574,2,1,Mitchel,1976", + "1022,Y,1,390,2,1,Gilbert,1999", + "1440,Y,1,0,0,1,Edwards,1977", + "1632,Y,1,576,2,1,Mitchel,1967", + "1344,Y,0,525,2,1,Timber,1958", + "1188,Y,1,456,2,1,OldTown,1890", + "1144,Y,1,796,1,1,NAmes,1959", + "1629,Y,1,808,3,1,Timber,2002", + "936,Y,1,474,2,1,CollgCr,2002", + "1381,Y,1,676,2,1,NWAmes,1972", + "864,Y,1,720,2,1,NAmes,1950", + "965,Y,1,300,1,1,SawyerW,1965", + "768,Y,1,396,1,1,CollgCr,1972", + "1168,Y,1,530,2,1,Mitchel,1969", + "980,Y,1,0,0,1,Edwards,1975", + "979,N,0,492,2,1,Crawfor,1946", + "561,Y,1,462,2,1,Blueste,1980", + "1057,Y,1,576,2,1,Sawyer,1962", + "1337,Y,1,531,2,1,CollgCr,2003", + "696,Y,1,484,2,1,Somerst,1999", + "858,Y,1,0,0,1,BrkSide,1958", + "1542,Y,1,619,2,1,NWAmes,1977", + "804,Y,1,440,2,1,NPkVill,1976", + "1800,Y,1,702,2,1,CollgCr,2007", + "824,Y,1,510,2,1,Timber,2002", + "783,Y,1,393,2,1,Gilbert,2005", + "976,Y,1,256,1,1,Crawfor,1940", + "1098,Y,1,260,1,1,NAmes,1955", + "600,N,1,0,0,0,OldTown,1910", + "1095,Y,1,264,1,1,NAmes,1958", + "720,N,0,0,0,1,NAmes,1949", + "764,Y,1,474,2,1,NridgHt,2003", + "918,Y,1,264,1,1,SawyerW,1979", + "1428,Y,1,480,2,1,Somerst,2007", + "1136,N,1,532,2,1,BrkSide,1910", + "673,Y,1,490,2,1,Somerst,2000", + "869,Y,0,0,0,1,BrkSide,1923", + "1241,Y,1,569,2,1,Somerst,2006", + "894,Y,1,400,2,1,Edwards,1954", + "1121,Y,1,480,2,1,Sawyer,1963", + "999,Y,1,588,2,1,NAmes,1961", + "1276,Y,1,676,3,1,NoRidge,1998", + "1266,Y,1,388,2,1,Blmngtn,2007", + "1149,Y,1,779,2,1,CollgCr,2002", + "1302,Y,1,539,2,1,Mitchel,1977", + "1164,N,1,240,1,1,Edwards,1950", + "1001,Y,1,255,1,1,OldTown,1910", + "1940,Y,1,606,3,1,NridgHt,2009", + "1118,Y,1,551,2,1,NWAmes,1976", + "778,Y,1,614,2,1,Somerst,2006", + "1407,Y,1,870,3,1,NoRidge,1997", + "916,Y,1,424,2,1,OldTown,1882", + "1020,Y,1,440,2,1,NAmes,1964", + "750,Y,1,564,2,1,CollgCr,2005", + "1718,Y,1,786,3,1,NridgHt,2006", + "774,Y,1,305,1,1,BrkSide,1946", + "1050,Y,1,368,1,1,NAmes,1961", + "1442,Y,1,615,2,1,NWAmes,1970", + "1077,N,1,210,1,1,IDOTRR,1922", + "1208,Y,1,632,2,1,CollgCr,2006", + "944,N,0,528,2,1,Edwards,1952", + "691,Y,0,216,1,1,OldTown,1920", + "1574,Y,1,824,3,1,Somerst,2006", + "1680,Y,1,528,2,1,NWAmes,1976", + "1504,Y,1,457,2,1,Blmngtn,2005", + "985,Y,1,328,1,1,CollgCr,1977", + "1657,Y,1,484,2,1,NAmes,1970", + "546,Y,1,286,1,1,MeadowV,1970", + "1710,Y,1,550,2,1,Mitchel,2004", + "1008,Y,1,0,0,1,SWISU,1926", + "720,Y,1,312,1,1,Sawyer,1948", + "1664,N,1,0,0,1,Edwards,1965", + "900,Y,1,180,1,1,Crawfor,1923", + "1022,N,1,280,1,1,OldTown,1910", + "1082,Y,1,240,1,1,NAmes,1948", + "810,Y,1,528,2,1,NWAmes,2001", + "1504,Y,1,478,2,1,CollgCr,1996", + "1360,Y,1,565,2,1,StoneBr,1984", + "802,Y,1,402,2,1,Gilbert,1991", + "1506,Y,1,440,2,1,Blmngtn,2005", + "1132,Y,1,451,2,1,Edwards,2005", + "1220,Y,1,632,2,1,CollgCr,2006", + "912,Y,0,160,1,1,OldTown,1930", + "1504,Y,1,437,2,1,Blmngtn,2005", + "2898,Y,1,665,2,1,Timber,1976", + "882,Y,1,461,2,1,CollgCr,1972", + "1264,Y,1,461,2,1,NAmes,1960", + "1646,Y,1,800,3,1,Timber,2007", + "968,Y,1,240,1,1,NAmes,1941", + "672,Y,1,264,1,1,BrDale,1972", + "948,Y,1,0,0,1,SWISU,1916", + "1687,Y,1,672,2,1,SWISU,1920", + "1352,Y,1,796,3,1,NoRidge,1993", + "1654,Y,1,900,3,1,CollgCr,2002", + "954,Y,1,240,1,1,Crawfor,1938", + "845,N,1,290,1,1,Edwards,1957", + "1620,Y,1,912,3,1,Timber,2007", + "1055,Y,1,905,2,1,CollgCr,2001", + "798,Y,1,0,0,1,MeadowV,1970", + "630,Y,1,286,1,1,MeadowV,1970", + "1803,Y,1,484,2,1,Sawyer,1957", + "800,Y,1,484,2,1,NWAmes,1966", + "1306,Y,1,624,2,1,NridgHt,2005", + "1532,Y,1,514,2,1,SawyerW,1990", + "2524,Y,1,542,2,1,NWAmes,1981", + "1733,Y,0,452,2,1,Edwards,1955", + "1992,Y,1,716,3,1,StoneBr,2005", + "990,Y,1,672,2,1,CollgCr,1994", + "1771,Y,1,336,2,1,Mitchel,1960", + "930,Y,1,308,1,1,Edwards,1946", + "1302,Y,1,436,2,1,Gilbert,2007", + "1316,Y,1,440,2,1,CollgCr,2007", + "1127,Y,1,540,2,1,NAmes,1964", + "1526,Y,0,364,1,1,Edwards,1957", + "1091,Y,1,586,2,1,CollgCr,2002", + "1523,Y,1,478,2,1,NWAmes,1976", + "1364,Y,1,484,2,1,NridgHt,2005", + "979,Y,1,467,2,1,NoRidge,1994", + "1130,Y,1,836,3,1,NridgHt,2008", + "1096,Y,1,432,2,1,Crawfor,1932", + "1338,Y,1,582,2,1,StoneBr,2001", + "894,Y,1,1248,3,1,IDOTRR,1935", + "1422,N,1,560,2,1,OldTown,1900", + "1103,Y,1,440,2,1,OldTown,1925", + "1154,Y,1,480,2,1,NAmes,1966", + "1306,Y,1,533,2,1,ClearCr,1996", + "799,Y,1,380,2,1,Gilbert,1993", + "798,Y,1,442,2,1,NAmes,1964", + "1291,Y,1,576,2,1,MeadowV,1973", + "893,Y,1,576,2,1,NAmes,1949", + "1048,Y,1,286,1,1,NAmes,1956", + "829,Y,1,441,2,1,NWAmes,1968", + "1002,Y,0,280,1,1,Edwards,1948", + "698,Y,1,440,2,1,Edwards,1977", + "1240,Y,1,826,3,1,CollgCr,2006", + "960,Y,1,240,1,1,Crawfor,1940", + "1096,Y,1,566,2,1,OldTown,1936", + "1096,Y,1,299,1,1,NAmes,1969", + "848,Y,1,420,2,1,CollgCr,2004", + "990,Y,1,299,1,1,CollgCr,1994", + "1258,Y,1,528,2,1,NWAmes,1971", + "1040,Y,1,308,1,1,Sawyer,1963", + "1459,Y,1,527,2,1,CollgCr,2002", + "1251,Y,1,461,1,1,NAmes,1964", + "691,Y,1,409,2,1,Gilbert,1995", + "996,Y,1,564,2,1,SawyerW,1992", + "546,Y,1,286,1,1,MeadowV,1973", + "1082,Y,1,1043,3,1,Somerst,2005", + "970,Y,1,380,2,1,Edwards,2004", + "1247,Y,1,550,2,1,Somerst,2005", + "1040,N,0,400,2,0,NAmes,1950", + "624,Y,1,462,2,1,Somerst,1999", + "1390,Y,1,576,2,1,SWISU,1925", + "1200,Y,1,884,2,1,NAmes,1965", + "936,Y,1,308,1,1,NAmes,1956", + "1314,Y,1,440,2,1,CollgCr,2006", + "773,Y,1,0,0,1,IDOTRR,1914", + "1088,Y,1,461,2,1,StoneBr,1986", + "757,Y,1,240,1,1,BrkSide,1936", + "1601,Y,1,478,2,1,NWAmes,1978", + "438,N,0,246,1,1,SWISU,1920", + "950,Y,1,280,1,1,NAmes,1971", + "1134,Y,1,254,1,1,NAmes,1960", + "1194,Y,1,539,2,1,NAmes,1959", + "630,Y,1,440,2,1,BrDale,1970", + "1500,Y,1,712,2,1,NoRidge,1994", + "1442,Y,1,719,2,1,SawyerW,1990", + "887,Y,1,422,2,1,Gilbert,2006", + "948,Y,1,463,2,1,Gilbert,2000", + "1836,Y,1,862,3,1,NridgHt,2004", + "773,Y,1,431,2,1,Gilbert,1995", + "1098,Y,1,483,2,1,NWAmes,1976", + "816,Y,0,308,1,1,NAmes,1957", + "1008,Y,1,240,1,1,NAmes,1953", + "833,Y,1,326,1,1,OldTown,1954", + "1734,Y,1,928,3,1,NridgHt,2007", + "779,Y,1,527,2,1,Gilbert,2002", + "894,Y,1,450,2,1,Sawyer,1967", + "1021,Y,1,300,1,1,Sawyer,1958", + "1040,Y,0,286,1,1,NAmes,1959", + "1012,Y,0,308,1,1,IDOTRR,1920", + "1552,Y,1,782,3,1,CollgCr,2005", + "960,Y,0,288,1,1,Edwards,1956", + "698,Y,1,0,0,1,Edwards,1947", + "812,Y,1,392,2,1,Gilbert,1992", + "1005,Y,1,672,2,1,NAmes,1955", + "1555,Y,1,660,3,1,Blmngtn,2007", + "1530,Y,1,630,3,1,Gilbert,2004", + "847,Y,1,434,2,1,Gilbert,2004", + "936,Y,1,672,2,1,OldTown,1980", + "1328,Y,1,576,2,1,SWISU,1928", + "974,Y,1,0,0,1,Mitchel,1991", + "1178,Y,1,205,1,1,OldTown,1880", + "1142,Y,1,466,2,1,SawyerW,1995", + "916,Y,1,460,2,1,Gilbert,1997", + "986,N,1,180,1,1,BrkSide,1926", + "1032,Y,0,288,1,1,NAmes,1950", + "780,N,1,0,0,1,Sawyer,1875", + "1567,Y,1,714,2,1,Mitchel,1977", + "1167,Y,1,495,2,1,BrkSide,1920", + "952,Y,1,840,2,1,NAmes,1951", + "1088,Y,1,484,2,1,NWAmes,1976", + "1466,Y,1,1052,3,1,CollgCr,2006", + "1006,Y,1,0,0,1,Sawyer,1959", + "672,N,0,280,1,0,Edwards,1941", + "1042,Y,1,225,1,1,BrkSide,1928", + "1298,Y,1,403,2,1,SawyerW,1985", + "860,Y,1,234,1,1,Crawfor,1941", + "572,Y,1,288,1,1,OldTown,1926", + "832,Y,1,324,2,1,OldTown,1920", + "932,Y,0,306,1,1,NAmes,1950", + "1466,Y,1,528,2,1,Edwards,1959", + "1811,Y,1,470,2,1,Crawfor,1956", + "816,Y,1,432,1,1,IDOTRR,1930", + "902,Y,1,492,2,1,NAmes,1965", + "1437,Y,1,528,2,1,Veenker,1976", + "1265,Y,1,502,2,1,NAmes,1965", + "1314,Y,1,626,2,1,NridgHt,2007", + "1580,Y,1,830,3,1,Somerst,2007", + "943,Y,1,540,2,1,NWAmes,1974", + "855,Y,1,440,2,1,NPkVill,1978", + "1640,Y,1,924,2,1,Crawfor,1954", + "894,Y,1,450,2,1,Sawyer,1968", + "1258,Y,1,400,2,1,Sawyer,1969", + "1432,Y,1,588,2,1,Veenker,1978", + "1502,Y,1,644,2,1,NridgHt,2009", + "1694,Y,1,776,3,1,CollgCr,2008", + "959,Y,1,472,2,1,Gilbert,2000", + "1236,Y,1,540,2,1,Edwards,1935", + "1831,Y,1,807,3,1,NoRidge,1995", + "1118,Y,1,358,1,1,Mitchel,1977", + "1261,Y,1,433,2,1,NAmes,1958", + "625,Y,1,625,2,1,Somerst,2006", + "1636,Y,1,0,0,1,ClearCr,1946", + "1170,Y,1,360,2,1,Crawfor,1932", + "2129,Y,1,541,2,1,NoRidge,1992", + "923,Y,1,264,1,1,Mitchel,1984", + "818,Y,1,210,1,1,OldTown,1926", + "820,Y,1,186,1,1,Crawfor,1921", + "1124,Y,1,0,0,1,Edwards,1954", + "1298,Y,1,693,2,1,Timber,1990", + "1652,Y,1,482,2,1,Crawfor,2008", + "2411,Y,1,813,3,1,NoRidge,1996", + "1130,Y,1,720,2,1,OldTown,1920", + "1572,Y,1,995,3,1,Timber,1963", + "949,Y,1,392,1,1,Edwards,1924", + "1014,Y,1,420,2,1,OldTown,1900", + "1624,Y,1,757,3,1,NoRidge,1994", + "831,Y,1,493,2,1,CollgCr,2002", + "1028,Y,1,442,2,1,Gilbert,1999", + "1622,Y,1,1356,4,1,Mitchel,1961", + "764,Y,1,492,2,1,Somerst,1999", + "842,Y,1,250,1,1,OldTown,1925", + "1224,Y,1,402,2,1,Mitchel,1999", + "663,Y,1,299,1,1,Sawyer,1969", + "728,Y,1,400,2,1,Gilbert,2005", + "879,Y,1,660,3,1,Gilbert,2006", + "815,Y,1,225,1,1,OldTown,1916", + "1212,Y,1,573,2,1,CollgCr,2001", + "1382,Y,1,459,2,1,Sawyer,1963", + "864,Y,1,280,1,1,NAmes,1970", + "866,Y,1,546,2,1,CollgCr,1998", + "884,Y,1,216,1,1,BrkSide,1925", + "1630,Y,1,451,2,1,CollgCr,2000", + "1074,Y,1,495,2,1,NWAmes,1975", + "2196,Y,1,701,3,1,SawyerW,1990", + "1056,Y,1,384,1,1,SawyerW,1966", + "1700,Y,1,544,2,1,CollgCr,2003", + "1283,Y,1,506,2,1,NAmes,1962", + "1660,Y,1,500,2,1,Somerst,2006", + "1055,Y,1,462,2,1,SawyerW,1992", + "1080,Y,1,492,2,1,Gilbert,1988", + "672,Y,1,234,1,1,Edwards,1941", + "960,Y,1,364,1,1,Sawyer,1965", + "999,Y,1,300,1,1,NAmes,1962", + "894,Y,1,384,1,1,Sawyer,1966", + "1318,Y,1,539,2,1,Sawyer,1978", + "1314,Y,1,552,2,1,Somerst,2009", + "672,N,1,0,0,1,BrkSide,1947", + "672,Y,1,0,0,1,BrDale,1971", + "912,Y,1,288,1,1,NAmes,1964", + "1211,Y,1,322,1,1,Sawyer,1968", + "1168,Y,1,315,1,1,NAmes,1949", + "2136,N,0,528,2,1,NAmes,1951", + "788,Y,1,388,2,1,Gilbert,2004", + "1138,Y,1,264,1,1,NAmes,1958", + "894,Y,1,668,3,1,Somerst,2007", + "912,Y,1,576,2,1,NAmes,1965", + "1702,Y,1,1052,3,1,NridgHt,2008", + "1507,Y,1,404,1,1,NAmes,1960", + "1361,Y,1,600,2,1,Sawyer,1977", + "1190,Y,1,540,2,1,NAmes,1962", + "1224,Y,1,462,2,1,NAmes,1962", + "1188,Y,1,531,2,1,NAmes,1959", + "1024,N,0,0,0,1,SWISU,1911", + "892,Y,0,180,1,1,Crawfor,1914", + "764,Y,1,474,2,1,NridgHt,2003", + "847,Y,1,434,2,1,CollgCr,2004", + "1141,Y,1,484,2,1,SawyerW,2005", + "1484,Y,1,472,2,1,Timber,2006", + "884,Y,1,543,2,1,CollgCr,2003", + "1689,Y,1,954,3,1,Somerst,2007", + "1173,Y,1,528,2,1,NWAmes,1974", + "2076,Y,1,850,3,1,NridgHt,2006", + "792,Y,1,400,2,1,Crawfor,1929", + "1140,Y,1,477,2,1,NWAmes,1984", + "756,Y,1,615,2,1,Somerst,2005", + "1034,Y,1,888,3,1,Mitchel,1976", + "1134,N,1,396,2,1,OldTown,1917", + "988,Y,1,276,1,1,NAmes,1950", + "2110,Y,1,522,2,1,NAmes,1968", + "1405,Y,1,478,2,1,NridgHt,2003", + "874,Y,1,288,1,1,Mitchel,1968", + "1516,Y,1,518,2,1,Veenker,1974", + "760,Y,1,397,2,1,Edwards,2003", + "959,Y,0,560,1,1,BrkSide,1931", + "1987,Y,1,691,2,1,NoRidge,1994", + "864,Y,1,0,0,1,Edwards,1922", + "1166,Y,1,400,2,1,Gilbert,2005", + "1054,Y,1,460,2,1,NAmes,1969", + "892,Y,1,502,2,1,Gilbert,1999", + "1050,Y,1,338,1,1,NAmes,1956", + "1104,Y,0,304,1,1,ClearCr,1957", + "1060,Y,0,520,2,1,BrkSide,1919", + "1337,Y,1,511,2,1,NAmes,1998", + "713,Y,1,506,2,1,Somerst,1999", + "964,N,0,308,1,1,OldTown,1910", + "2018,Y,1,746,3,1,Timber,2008", + "1968,Y,1,1014,3,1,Crawfor,1935", + "874,Y,0,315,1,1,NAmes,1958", + "1332,Y,1,586,2,1,ClearCr,1979", + "1489,Y,1,462,2,1,NWAmes,1968", + "935,Y,1,288,1,1,Sawyer,1965", + "1357,Y,1,312,1,1,Edwards,1959", + "661,Y,1,552,2,1,Crawfor,1910", + "928,Y,0,400,2,1,NAmes,1948", + "735,Y,1,497,2,1,NWAmes,1972", + "1724,Y,1,480,2,1,NWAmes,1967", + "1128,Y,1,577,2,1,CollgCr,2002", + "698,Y,0,528,2,1,IDOTRR,1920", + "1573,Y,1,544,2,1,CollgCr,2002", + "1339,Y,1,484,2,1,Timber,1990", + "1040,Y,1,484,2,1,CollgCr,1977", + "912,Y,1,0,0,1,Mitchel,1971", + "1699,Y,1,336,1,1,SWISU,1919", + "825,Y,0,280,1,1,BrkSide,1939", + "1328,Y,1,528,2,1,NWAmes,1963", + "1582,Y,1,390,2,1,ClearCr,1964", + "1659,Y,1,499,2,1,StoneBr,2000", + "1120,Y,1,753,3,1,NridgHt,2006", + "1152,Y,1,484,2,1,NAmes,1964", + "630,Y,1,264,1,1,BrDale,1972", + "1378,N,1,432,1,1,OldTown,1892", + "832,Y,1,528,2,1,NWAmes,1976", + "864,Y,1,572,2,1,Edwards,1955", + "1052,Y,1,288,1,1,NAmes,1968", + "1128,Y,1,525,2,1,NAmes,1963", + "1072,Y,1,525,2,1,Edwards,2005", + "4692,Y,1,1418,2,1,Edwards,2008", + "1246,Y,1,305,1,1,NAmes,1959", + "1005,Y,1,490,2,1,Gilbert,1999", + "753,Y,1,213,1,1,Crawfor,1942", + "1203,Y,1,844,3,1,NoRidge,1994", + "1616,Y,1,834,3,1,Somerst,2005", + "976,Y,1,380,2,1,Edwards,2004", + "1652,Y,1,840,2,1,NridgHt,2006", + "1368,Y,1,474,2,1,NridgHt,2005", + "990,Y,1,480,2,1,CollgCr,1994", + "1122,Y,1,528,2,1,OldTown,1948", + "1294,Y,1,496,2,1,SawyerW,1991", + "1902,Y,1,567,2,1,Crawfor,1959", + "1274,Y,1,508,2,1,CollgCr,2005", + "1453,Y,1,750,2,1,NoRidge,1990", + "1422,Y,1,779,3,1,NoRidge,1999", + "948,Y,1,280,1,1,Edwards,1954", + "1092,Y,1,576,2,1,NAmes,1969", + "1630,Y,1,860,3,1,CollgCr,2008", + "1352,Y,1,466,2,1,Somerst,2006", + "1787,Y,1,748,3,1,CollgCr,2001", + "948,Y,1,248,1,1,Edwards,1954", + "1478,Y,1,442,2,1,NAmes,1957", + "720,N,1,287,1,0,BrkSide,1949", + "1061,Y,1,564,2,1,NoRidge,1992", + "708,Y,1,0,0,1,BrkSide,1940", + "1795,Y,1,895,3,1,Somerst,2006", + "796,N,1,0,0,1,IDOTRR,1922", + "774,Y,1,0,0,1,Edwards,1931", + "816,Y,1,264,1,1,Mitchel,1982", + "1584,Y,1,520,2,1,OldTown,1920", + "955,Y,1,462,2,1,Gilbert,1998", + "1588,Y,1,825,3,1,Somerst,2006", + "954,Y,1,576,2,1,CollgCr,1976", + "816,Y,1,288,1,1,Edwards,1938", + "803,Y,1,297,1,1,IDOTRR,1938", + "765,Y,1,440,2,1,BrDale,1970", + "1334,Y,1,630,2,1,NWAmes,1977", + "1656,Y,1,506,2,1,NWAmes,1973", + "693,N,0,0,0,0,OldTown,1941", + "920,Y,1,492,2,1,CollgCr,2002", + "864,Y,1,288,1,1,CollgCr,1972", + "872,Y,1,480,4,1,NAmes,1971", + "1114,Y,1,576,2,1,SawyerW,2003", + "1284,Y,1,647,2,1,CollgCr,2002", + "1172,Y,1,342,2,1,Crawfor,1928", + "728,Y,1,440,2,1,CollgCr,2006", + "960,Y,0,308,1,1,OldTown,1920", + "2156,Y,1,508,2,1,NWAmes,1968", + "1776,Y,1,712,3,1,Timber,2006", + "1494,Y,1,514,2,1,SawyerW,1998", + "938,N,1,0,0,1,OldTown,1872", + "1338,Y,1,968,4,1,NAmes,1969", + "858,Y,1,490,2,1,NAmes,1962", + "786,Y,1,624,2,1,BrkSide,1937", + "2053,Y,1,666,3,1,NoRidge,1995", + "992,Y,1,839,3,1,CollgCr,2000", + "1222,Y,1,487,2,1,NWAmes,1968", + "892,Y,1,264,1,1,NAmes,1966", + "1078,Y,1,500,2,1,NAmes,1971", + "769,Y,1,440,2,1,Somerst,2000", + "1980,Y,1,770,3,1,NridgHt,2004", + "990,Y,1,621,2,1,SWISU,1921", + "1530,Y,1,430,2,1,StoneBr,2005", + "1281,Y,1,368,1,1,NAmes,1920", + "616,Y,1,432,2,1,Gilbert,2006", + "520,Y,1,480,2,1,Somerst,2005", + "814,Y,1,663,2,1,Somerst,2000", + "882,Y,1,588,2,1,CollgCr,1999", + "925,Y,1,336,1,1,MeadowV,1977", + "848,Y,1,420,2,1,CollgCr,2003", + "1668,Y,1,502,2,1,CollgCr,2003", + "840,Y,1,338,1,1,OldTown,1920", + "1661,Y,1,377,1,1,NAmes,1955", + "1108,Y,1,583,2,1,CollgCr,1998", + "2633,Y,1,804,3,1,NoRidge,2001", + "1026,Y,1,936,3,1,Somerst,2005", + "1571,Y,1,722,3,1,Timber,2007", + "790,N,1,160,1,1,SWISU,1930", + "984,Y,1,660,2,1,Edwards,1941", + "483,Y,1,264,1,1,BrDale,1973", + "754,Y,1,400,2,1,Timber,2006", + "864,N,0,200,1,1,Edwards,1914", + "2117,Y,1,550,2,1,NAmes,1970", + "998,Y,1,576,2,1,OldTown,1920", + "1416,Y,1,576,2,1,Sawyer,1918", + "698,Y,1,280,1,1,Edwards,1939", + "796,Y,1,240,1,1,IDOTRR,1922", + "1392,Y,1,564,2,1,NWAmes,1978", + "1664,N,1,216,1,1,OldTown,1916", + "1746,Y,1,758,3,1,Gilbert,2006", + "869,Y,1,440,2,1,BrkSide,1941", + "1525,Y,1,541,2,1,CollgCr,2000", + "1584,Y,1,792,3,1,NAmes,1967", + "900,Y,1,288,1,1,NAmes,1967", + "1221,N,1,672,2,1,OldTown,1905", + "1500,Y,1,648,3,1,Blmngtn,2006", + "1133,Y,1,642,3,1,Timber,2005", + "1687,Y,1,572,2,1,Timber,1948", + "939,Y,1,180,1,1,BrkSide,1920", + "1136,Y,0,240,1,1,NAmes,1950", + "1160,Y,1,216,1,1,SWISU,1925", + "950,Y,1,208,1,1,BrkSide,1929", + "864,Y,1,398,2,1,Gilbert,2004", + "1294,Y,1,662,2,1,CollgCr,2006", + "1464,Y,1,754,3,1,Somerst,2007", + "694,Y,1,936,3,1,OldTown,1915", + "1646,Y,1,482,2,1,Crawfor,2004", + "768,Y,1,396,2,1,CollgCr,1972", + "833,Y,1,0,0,1,Mitchel,1985", + "741,Y,1,528,2,1,OldTown,1910", + "1236,Y,1,542,2,1,NWAmes,1986", + "944,Y,1,622,2,1,CollgCr,2001", + "1112,Y,1,271,1,1,NAmes,1950", + "1040,N,0,420,2,0,NAmes,1949", + "1844,Y,1,620,2,1,SawyerW,2005", + "1053,Y,1,370,2,1,BrkSide,1923", + "1569,Y,1,660,3,1,Blmngtn,2007", + "1246,Y,1,560,2,1,OldTown,1885", + "1310,Y,1,1069,3,1,NoRidge,1998", + "1144,Y,1,336,1,1,NAmes,1963", + "1844,Y,1,540,2,1,Crawfor,1969", + "708,Y,1,776,2,1,NWAmes,1968", + "1069,Y,1,440,2,1,NPkVill,1977", + "848,Y,1,420,2,1,CollgCr,2003", + "1575,Y,1,432,2,1,Edwards,1966", + "1344,Y,1,484,1,1,NAmes,1958", + "1252,Y,1,528,2,1,NAmes,1959", + "1223,Y,1,525,2,1,NoRidge,1994", + "1048,Y,0,288,1,1,NAmes,1945", + "804,Y,1,240,1,1,OldTown,1940", + "1440,Y,1,467,2,1,NWAmes,1981", + "734,Y,1,372,2,1,Gilbert,2005", + "958,Y,1,440,2,1,NPkVill,1976", + "968,Y,1,216,1,1,OldTown,1927", + "962,Y,1,451,2,1,Gilbert,2000", + "1126,Y,1,484,2,1,Mitchel,1977", + "1537,Y,1,462,2,1,NAmes,1962", + "864,Y,1,528,2,1,NAmes,1971", + "1932,Y,1,774,3,1,NridgHt,2008", + "1236,Y,0,923,2,1,OldTown,1957", + "1040,Y,1,550,2,1,NWAmes,1979", + "1423,Y,1,672,2,1,Crawfor,1922", + "848,Y,1,420,2,1,CollgCr,2004", + "1026,Y,1,812,3,1,Somerst,2008", + "952,N,0,192,1,0,BrkSide,1916", + "1422,Y,1,626,2,1,CollgCr,2004", + "913,Y,1,240,1,1,Sawyer,1966", + "1188,Y,1,312,1,1,Mitchel,1962", + "1220,Y,1,556,2,1,CollgCr,1995", + "796,N,1,384,1,1,Edwards,1910", + "630,Y,1,0,0,1,MeadowV,1970", + "896,Y,1,0,0,1,NAmes,1974", + "1578,Y,1,840,3,1,Somerst,2008", + "1072,Y,1,525,2,1,Edwards,2005", + "1140,Y,1,0,0,1,Mitchel,2006", + "1221,Y,1,400,2,1,Somerst,2004", + "953,Y,1,460,2,1,Gilbert,1999", + "2073,Y,1,500,2,1,NWAmes,1978", + "1188,Y,1,252,1,1,Crawfor,1941", + "1078,Y,0,240,1,1,NAmes,1950", + "1256,Y,1,276,1,1,Edwards,1965" + ); + + static final String TARGET_FIELD = "CentralAir"; + + private String jobId; + private String sourceIndex; + private String destIndex; + + @Before + public void setupLogging() { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put("logger.org.elasticsearch.xpack.ml.process", "DEBUG") + .put("logger.org.elasticsearch.xpack.ml.dataframe", "DEBUG") + ) + .get(); + } + + @After + public void cleanup() { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .putNull("logger.org.elasticsearch.xpack.ml.process") + .putNull("logger.org.elasticsearch.xpack.ml.dataframe") + ) + .get(); + cleanUp(); + } + + public void testFeatureImportanceValues() throws Exception { + initialize("classification_house_pricing_test_feature_importance_values"); + indexData(sourceIndex); + DataFrameAnalyticsConfig config = buildAnalytics( + jobId, + sourceIndex, + destIndex, + null, + new Classification( + TARGET_FIELD, + BoostedTreeParams.builder().setNumTopFeatureImportanceValues(5).build(), + null, + null, + null, + 35.0, + null, + null, + null + ) + ); + + putAnalytics(config); + + assertIsStopped(jobId); + assertProgressIsZero(jobId); + + startAnalytics(jobId); + waitUntilAnalyticsIsStopped(jobId); + + client().admin().indices().refresh(new RefreshRequest(destIndex)); + SearchResponse sourceData = client().prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000).get(); + for (SearchHit hit : sourceData.getHits()) { + Map destDoc = getDestDoc(config, hit); + Map resultsObject = getFieldValue(destDoc, "ml"); + @SuppressWarnings("unchecked") + List> importanceArray = (List>) resultsObject.get("feature_importance"); + assertThat(importanceArray, hasSize(greaterThan(0))); + } + + } + + static void indexData(String sourceIndex) { + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (String row : DATA) { + String[] values = row.split(","); + List source = List.of( + "1stFlrSF", + Integer.valueOf(values[0]), + "CentralAir", + values[1], + "Electrical", + values[2], + "GarageArea", + Integer.valueOf(values[3]), + "GarageCars", + Integer.valueOf(values[4]), + "Heating", + values[5], + "Neighborhood", + values[6], + "YearBuilt", + Integer.valueOf(values[7]) + ); + IndexRequest indexRequest = new IndexRequest(sourceIndex).source(source.toArray()).opType(DocWriteRequest.OpType.CREATE); + bulkRequestBuilder.add(indexRequest); + } + BulkResponse bulkResponse = bulkRequestBuilder.get(); + if (bulkResponse.hasFailures()) { + fail("Failed to index data: " + bulkResponse.buildFailureMessage()); + } + } + + private void initialize(String jobId) { + this.jobId = jobId; + this.sourceIndex = jobId + "_source_index"; + this.destIndex = sourceIndex + "_results"; + createIndex(sourceIndex); + } + + static void createIndex(String index) { + String mapping = """ + { + "properties": { + "1stFlrSF": { + "type": "integer" + }, + "CentralAir": { + "type": "keyword" + }, + "Electrical": { + "type": "keyword" + }, + "GarageArea": { + "type": "integer" + }, + "GarageCars": { + "type": "integer" + }, + "Heating": { + "type": "keyword" + }, + "Neighborhood": { + "type": "keyword" + }, + "YearBuilt": { + "type": "integer" + } + } + }"""; + + client().admin().indices().prepareCreate(index).setMapping(mapping).get(); + } + + @Override + boolean supportsInference() { + return true; + } + + private static Map getDestDoc(DataFrameAnalyticsConfig config, SearchHit hit) { + GetResponse destDocGetResponse = client().prepareGet().setIndex(config.getDest().getIndex()).setId(hit.getId()).get(); + assertThat(destDocGetResponse.isExists(), is(true)); + Map sourceDoc = hit.getSourceAsMap(); + Map destDoc = destDocGetResponse.getSource(); + for (String field : sourceDoc.keySet()) { + assertThat(destDoc, hasKey(field)); + assertThat(destDoc.get(field), equalTo(sourceDoc.get(field))); + } + return destDoc; + } +} From 5a26455170ec3657e65aed0fbd6d39a8a9a3e3ce Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Fri, 12 Aug 2022 08:28:31 -0500 Subject: [PATCH 189/265] Adding a check to the master stability health API when there is no master and the current node is not master eligible (#89219) This PR builds on #86524, #87482, and #87306 by supporting the case where there has been no master node in the last 30 second, no node has been elected master, and the current node is not master eligible. --- docs/changelog/89219.yaml | 6 + .../CoordinationDiagnosticsServiceIT.java | 77 ++++++++++++- .../discovery/StableMasterDisruptionIT.java | 45 -------- .../CoordinationDiagnosticsService.java | 107 +++++++++++++++++- .../CoordinationDiagnosticsServiceTests.java | 91 +++++++++++++++ .../AbstractCoordinatorTestCase.java | 1 + 6 files changed, 274 insertions(+), 53 deletions(-) create mode 100644 docs/changelog/89219.yaml diff --git a/docs/changelog/89219.yaml b/docs/changelog/89219.yaml new file mode 100644 index 0000000000000..010c1d056ea1c --- /dev/null +++ b/docs/changelog/89219.yaml @@ -0,0 +1,6 @@ +pr: 89219 +summary: Adding a check to the master stability health API when there is no master + and the current node is not master eligible +area: Health +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java index 4cc4589d71350..93878f8b66fc7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java @@ -35,6 +35,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -199,8 +200,7 @@ public void testNoQuorumSeenFromNonMasterNodes() throws Exception { .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(1, TimeUnit.SECONDS)) .build() ); - internalCluster().getInstances(CoordinationDiagnosticsService.class) - .forEach(coordinationDiagnosticsService -> CoordinationDiagnosticsService.remoteRequestInitialDelay = TimeValue.ZERO); + CoordinationDiagnosticsService.remoteRequestInitialDelay = TimeValue.ZERO; ensureStableCluster(5); String firstMasterNode = internalCluster().getMasterName(); List nonActiveMasterNodes = masterNodes.stream().filter(nodeName -> firstMasterNode.equals(nodeName) == false).toList(); @@ -266,6 +266,7 @@ public boolean validateClusterForming() { CoordinationDiagnosticsService.class, randomMasterNodeName ); + CoordinationDiagnosticsService.CoordinationDiagnosticsResult result = diagnosticsOnMasterEligibleNode.diagnoseMasterStability( true ); @@ -293,4 +294,76 @@ public boolean validateClusterForming() { internalCluster().stopNode(dataNodeName); // This is needed for the test to clean itself up happily } } + + public void testNoQuorum() throws Exception { + /* + * In this test we have three master-eligible nodes and two data-only nodes. We make it so that the two non-active + * master-eligible nodes cannot communicate with each other but can each communicate with one data-only node, and then we + * stop the active master node. Now there is no quorum so a new master cannot be elected. We set the master lookup threshold very + * low on the data nodes, so when we run the master stability check on each of the master nodes, it will see that there has been no + * master recently and because there is no quorum, so it returns a RED status. We also check that each of the data-only nodes + * reports a RED status because there is no quorum (having polled that result from the master-eligible node it can communicate + * with). + */ + CoordinationDiagnosticsService.remoteRequestInitialDelay = TimeValue.ZERO; + var settings = Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) + .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), TimeValue.ZERO) + .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(1, TimeUnit.SECONDS)) + .build(); + var masterNodes = internalCluster().startMasterOnlyNodes(3, settings); + var dataNodes = internalCluster().startDataOnlyNodes(2, settings); + ensureStableCluster(5); + String firstMasterNode = internalCluster().getMasterName(); + List nonActiveMasterNodes = masterNodes.stream().filter(nodeName -> firstMasterNode.equals(nodeName) == false).toList(); + NetworkDisruption networkDisconnect = new NetworkDisruption( + new NetworkDisruption.TwoPartitions( + Set.of(nonActiveMasterNodes.get(0), dataNodes.get(0)), + Set.of(nonActiveMasterNodes.get(1), dataNodes.get(1)) + ), + NetworkDisruption.UNRESPONSIVE + ); + + internalCluster().clearDisruptionScheme(); + setDisruptionScheme(networkDisconnect); + networkDisconnect.startDisrupting(); + internalCluster().stopNode(firstMasterNode); + for (String nonActiveMasterNode : nonActiveMasterNodes) { + CoordinationDiagnosticsService diagnosticsOnMasterEligibleNode = internalCluster().getInstance( + CoordinationDiagnosticsService.class, + nonActiveMasterNode + ); + assertBusy(() -> { + CoordinationDiagnosticsService.CoordinationDiagnosticsResult result = diagnosticsOnMasterEligibleNode + .diagnoseMasterStability(true); + assertThat(result.status(), equalTo(CoordinationDiagnosticsService.CoordinationDiagnosticsStatus.RED)); + assertThat( + result.summary(), + anyOf( + containsString("the master eligible nodes are unable to form a quorum"), + containsString("the cause has not been determined.") + ) + ); + }); + } + for (String dataNode : dataNodes) { + CoordinationDiagnosticsService diagnosticsOnDataNode = internalCluster().getInstance( + CoordinationDiagnosticsService.class, + dataNode + ); + assertBusy(() -> { + CoordinationDiagnosticsService.CoordinationDiagnosticsResult result = diagnosticsOnDataNode.diagnoseMasterStability(true); + assertThat(result.status(), equalTo(CoordinationDiagnosticsService.CoordinationDiagnosticsStatus.RED)); + assertThat( + result.summary(), + anyOf( + containsString("the master eligible nodes are unable to form a quorum"), + containsString("the cause has not been determined.") + ) + ); + }); + } + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index d83712dde30da..b616da1d0735e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -64,7 +64,6 @@ import java.util.concurrent.TimeUnit; import static java.util.Collections.singleton; -import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -564,48 +563,4 @@ public void testCannotJoinLeader() throws Exception { containsString("has been elected master, but the node being queried") ); } - - public void testNoQuorum() throws Exception { - /* - * In this test we have three master-eligible nodes. We make it so that the two non-active ones cannot communicate, and then we - * stop the active master node. Now there is no quorum so a new master cannot be elected. We set the master lookup threshold very - * low on the data nodes, so when we run the master stability check on each of the master nodes, it will see that there has been no - * master recently and because there is no quorum, so it returns a RED status. - */ - var settings = Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) - .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), TimeValue.ZERO) - .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(1, TimeUnit.SECONDS)) - .build(); - var masterNodes = internalCluster().startMasterOnlyNodes(3, settings); - var dataNodes = internalCluster().startDataOnlyNodes(2, settings); - ensureStableCluster(5); - String firstMasterNode = internalCluster().getMasterName(); - List nonActiveMasterNodes = masterNodes.stream().filter(nodeName -> firstMasterNode.equals(nodeName) == false).toList(); - NetworkDisruption networkDisconnect = new NetworkDisruption( - new NetworkDisruption.TwoPartitions( - Set.of(nonActiveMasterNodes.get(0), dataNodes.get(0)), - Set.of(nonActiveMasterNodes.get(1), dataNodes.get(1)) - ), - NetworkDisruption.UNRESPONSIVE - ); - - internalCluster().clearDisruptionScheme(); - setDisruptionScheme(networkDisconnect); - networkDisconnect.startDisrupting(); - internalCluster().stopNode(firstMasterNode); - for (String nonActiveMasterNode : nonActiveMasterNodes) { - assertMasterStability( - internalCluster().client(nonActiveMasterNode), - HealthStatus.RED, - anyOf( - containsString("unable to form a quorum"), - containsString("No master node observed in the last 1s, and the cause has not been determined.") - // later happens if master node has not replied within 1s - ) - ); - } - } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index 82b85329d9dbd..24ca5e1abc523 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -370,11 +370,12 @@ private CoordinationDiagnosticsResult diagnoseOnHaveNotSeenMasterRecently(Master DiscoveryNode currentMaster = coordinator.getPeerFinder().getLeader().get(); result = getResultOnCannotJoinLeader(localMasterHistory, currentMaster, explain); } else if (isLocalNodeMasterEligible == false) { // none is elected master and we aren't master eligible - // NOTE: The logic in this block will be implemented in a future PR - result = new CoordinationDiagnosticsResult( - CoordinationDiagnosticsStatus.RED, - "No master has been observed recently", - CoordinationDiagnosticsDetails.EMPTY + result = diagnoseOnHaveNotSeenMasterRecentlyAndWeAreNotMasterEligible( + localMasterHistory, + coordinator, + nodeHasMasterLookupTimeframe, + remoteCoordinationDiagnosisResult, + explain ); } else { // none is elected master and we are master eligible result = diagnoseOnHaveNotSeenMasterRecentlyAndWeAreMasterEligible( @@ -389,6 +390,91 @@ private CoordinationDiagnosticsResult diagnoseOnHaveNotSeenMasterRecently(Master return result; } + /** + * This method handles the case when we have not had an elected master node recently, and we are on a node that is not + * master-eligible. In this case we reach out to some master-eligible node in order to see what it knows about master stability. + * @param localMasterHistory The master history, as seen from this node + * @param coordinator The Coordinator for this node + * @param nodeHasMasterLookupTimeframe The value of health.master_history.has_master_lookup_timeframe + * @param remoteCoordinationDiagnosisResult A reference to the result of polling a master-eligible node for diagnostic information + * @param explain If true, details are returned + * @return A CoordinationDiagnosticsResult that will be determined by the CoordinationDiagnosticsResult returned by the remote + * master-eligible node + */ + static CoordinationDiagnosticsResult diagnoseOnHaveNotSeenMasterRecentlyAndWeAreNotMasterEligible( + MasterHistory localMasterHistory, + Coordinator coordinator, + TimeValue nodeHasMasterLookupTimeframe, + AtomicReference remoteCoordinationDiagnosisResult, + boolean explain + ) { + RemoteMasterHealthResult remoteResultOrException = remoteCoordinationDiagnosisResult == null + ? null + : remoteCoordinationDiagnosisResult.get(); + final CoordinationDiagnosticsStatus status; + final String summary; + final CoordinationDiagnosticsDetails details; + if (remoteResultOrException == null) { + status = CoordinationDiagnosticsStatus.RED; + summary = String.format( + Locale.ROOT, + "No master node observed in the last %s, and this node is not master eligible. Reaching out to a master-eligible node" + + " for more information", + nodeHasMasterLookupTimeframe + ); + if (explain) { + details = getDetails( + true, + localMasterHistory, + null, + Map.of(coordinator.getLocalNode().getId(), coordinator.getClusterFormationState().getDescription()) + ); + } else { + details = CoordinationDiagnosticsDetails.EMPTY; + } + } else { + DiscoveryNode remoteNode = remoteResultOrException.node; + CoordinationDiagnosticsResult remoteResult = remoteResultOrException.result; + Exception exception = remoteResultOrException.remoteException; + if (remoteResult != null) { + if (remoteResult.status().equals(CoordinationDiagnosticsStatus.GREEN) == false) { + status = remoteResult.status(); + summary = remoteResult.summary(); + } else { + status = CoordinationDiagnosticsStatus.RED; + summary = String.format( + Locale.ROOT, + "No master node observed in the last %s from this node, but %s reports that the status is GREEN. This " + + "indicates that there is a discovery problem on %s", + nodeHasMasterLookupTimeframe, + remoteNode.getName(), + coordinator.getLocalNode().getName() + ); + } + if (explain) { + details = remoteResult.details(); + } else { + details = CoordinationDiagnosticsDetails.EMPTY; + } + } else { + status = CoordinationDiagnosticsStatus.RED; + summary = String.format( + Locale.ROOT, + "No master node observed in the last %s from this node, and received an exception while reaching out to %s for " + + "diagnosis", + nodeHasMasterLookupTimeframe, + remoteNode.getName() + ); + if (explain) { + details = getDetails(true, localMasterHistory, exception, null); + } else { + details = CoordinationDiagnosticsDetails.EMPTY; + } + } + } + return new CoordinationDiagnosticsResult(status, summary, details); + } + /** * This method handles the case when we have not had an elected master node recently, and we are on a master-eligible node. In this * case we look at the cluster formation information from all master-eligible nodes, trying to understand if we have a discovery @@ -1210,5 +1296,14 @@ public void writeTo(StreamOutput out) throws IOException { } // Non-private for testing: - record RemoteMasterHealthResult(DiscoveryNode node, CoordinationDiagnosticsResult result, Exception remoteException) {} + record RemoteMasterHealthResult(DiscoveryNode node, CoordinationDiagnosticsResult result, Exception remoteException) { + public RemoteMasterHealthResult { + if (node == null) { + throw new IllegalArgumentException("Node cannot be null"); + } + if (result == null && remoteException == null) { + throw new IllegalArgumentException("Must provide a non-null value for one of result or remoteException"); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java index 4205fd7b97099..4fe654dd82025 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java @@ -48,6 +48,7 @@ import static org.elasticsearch.cluster.coordination.AbstractCoordinatorTestCase.Cluster.EXTREME_DELAY_VARIABILITY; import static org.elasticsearch.cluster.coordination.CoordinationDiagnosticsService.ClusterFormationStateOrException; +import static org.elasticsearch.cluster.coordination.CoordinationDiagnosticsService.CoordinationDiagnosticsStatus; import static org.elasticsearch.monitor.StatusInfo.Status.HEALTHY; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyOrNullString; @@ -496,6 +497,84 @@ public void testRedForDiscoveryProblems() { } } + public void testRedForNoMasterQueryingNonMaster() { + /* + * This test simulates a cluster with 3 master-eligible nodes and two data nodes. It disconnects all master-eligible nodes + * except one random one, and then asserts that we get the expected response from calling diagnoseMasterStability() on each of + * the data nodes. It then sets various values for + * remoteCoordinationDiagnosisResult on each of the non-master-eligible nodes (simulating different + * responses from a master-eligible node that it has polled), and then asserts that the correct result comes back from + * diagnoseMasterStability(). + */ + try (Cluster cluster = new Cluster(3, true, Settings.EMPTY)) { + createAndAddNonMasterNode(cluster); + createAndAddNonMasterNode(cluster); + cluster.runRandomly(false, true, EXTREME_DELAY_VARIABILITY); + cluster.stabilise(); + DiscoveryNode nonKilledMasterNode = cluster.getAnyLeader().getLocalNode(); + for (Cluster.ClusterNode node : cluster.clusterNodes) { + if (node.getLocalNode().isMasterNode() && node.getLocalNode().equals(nonKilledMasterNode) == false) { + node.disconnect(); + } + } + cluster.runFor(DEFAULT_STABILISATION_TIME, "Cannot call stabilise() because there is no master"); + for (Cluster.ClusterNode node : cluster.clusterNodes.stream() + .filter(node -> node.getLocalNode().isMasterNode() == false) + .toList()) { + CoordinationDiagnosticsService.CoordinationDiagnosticsResult healthIndicatorResult = node.coordinationDiagnosticsService + .diagnoseMasterStability(true); + assertThat(healthIndicatorResult.status(), equalTo(CoordinationDiagnosticsStatus.RED)); + String summary = healthIndicatorResult.summary(); + assertThat( + summary, + containsString("No master node observed in the last 30s, and the master eligible nodes are unable to form a quorum") + ); + CoordinationDiagnosticsStatus artificialRemoteStatus = randomValueOtherThan( + CoordinationDiagnosticsStatus.GREEN, + () -> randomFrom(CoordinationDiagnosticsStatus.values()) + ); + String artificialRemoteStatusSummary = "Artificial failure"; + CoordinationDiagnosticsService.CoordinationDiagnosticsResult artificialRemoteResult = + new CoordinationDiagnosticsService.CoordinationDiagnosticsResult( + artificialRemoteStatus, + artificialRemoteStatusSummary, + null + ); + node.coordinationDiagnosticsService.remoteCoordinationDiagnosisResult = new AtomicReference<>( + new CoordinationDiagnosticsService.RemoteMasterHealthResult(nonKilledMasterNode, artificialRemoteResult, null) + ); + healthIndicatorResult = node.coordinationDiagnosticsService.diagnoseMasterStability(true); + assertThat(healthIndicatorResult.status(), equalTo(artificialRemoteStatus)); + assertThat(healthIndicatorResult.summary(), containsString(artificialRemoteStatusSummary)); + + artificialRemoteResult = new CoordinationDiagnosticsService.CoordinationDiagnosticsResult( + CoordinationDiagnosticsStatus.GREEN, + artificialRemoteStatusSummary, + null + ); + node.coordinationDiagnosticsService.remoteCoordinationDiagnosisResult = new AtomicReference<>( + new CoordinationDiagnosticsService.RemoteMasterHealthResult(nonKilledMasterNode, artificialRemoteResult, null) + ); + healthIndicatorResult = node.coordinationDiagnosticsService.diagnoseMasterStability(true); + assertThat(healthIndicatorResult.status(), equalTo(CoordinationDiagnosticsService.CoordinationDiagnosticsStatus.RED)); + assertThat(healthIndicatorResult.summary(), containsString("reports that the status is GREEN")); + + Exception artificialRemoteResultException = new RuntimeException(artificialRemoteStatusSummary); + node.coordinationDiagnosticsService.remoteCoordinationDiagnosisResult = new AtomicReference<>( + new CoordinationDiagnosticsService.RemoteMasterHealthResult(nonKilledMasterNode, null, artificialRemoteResultException) + ); + healthIndicatorResult = node.coordinationDiagnosticsService.diagnoseMasterStability(true); + assertThat(healthIndicatorResult.status(), equalTo(CoordinationDiagnosticsStatus.RED)); + assertThat(healthIndicatorResult.summary(), containsString("received an exception")); + } + + while (cluster.clusterNodes.stream().anyMatch(Cluster.ClusterNode::deliverBlackholedRequests)) { + logger.debug("--> stabilising again after delivering blackholed requests"); + cluster.runFor(DEFAULT_STABILISATION_TIME, "Cannot call stabilise() because there is no master"); + } + } + } + public void testYellowWithTooManyMasterChanges() { testChangeMasterThreeTimes(2, 100, "The elected master node has changed"); } @@ -1064,6 +1143,18 @@ public void testBeginPollingRemoteMasterStabilityDiagnosticCancel() { } } + public void testRemoteMasterHealthResult() { + expectThrows(IllegalArgumentException.class, () -> new CoordinationDiagnosticsService.RemoteMasterHealthResult(null, null, null)); + expectThrows( + IllegalArgumentException.class, + () -> new CoordinationDiagnosticsService.RemoteMasterHealthResult(null, null, new RuntimeException()) + ); + expectThrows( + IllegalArgumentException.class, + () -> new CoordinationDiagnosticsService.RemoteMasterHealthResult(mock(DiscoveryNode.class), null, null) + ); + } + public void testResultSerialization() { CoordinationDiagnosticsService.CoordinationDiagnosticsStatus status = getRandomStatus(); CoordinationDiagnosticsService.CoordinationDiagnosticsDetails details = getRandomDetails(); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index 971cf9c57d484..49664761f7897 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -1294,6 +1294,7 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { coordinator.start(); gatewayService.start(); clusterService.start(); + coordinationDiagnosticsService.start(); coordinator.startInitialJoin(); } From ca11e82331d5d097ba20b3883fcf4542ac9176f7 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 12 Aug 2022 19:01:39 +0300 Subject: [PATCH 190/265] [ML] Improve reason when autoscaling capacity cannot be computed (#89316) When we cannot compute autoscaling capacity we call `MlAutoscalingDeciderService.buildDecisionAndRequestRefresh`. There are 4 callers. 3 of them set their own specific reason before the call. However, the method overwrites the reason with the generic `MEMORY_STALE` msg. This commit addresses this issue which should bubble up more detailed reasons for those edge cases. --- .../xpack/ml/autoscaling/MlAutoscalingDeciderService.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java index 8c3b3df423c85..39913e3e5d879 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java @@ -342,7 +342,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider "view of job memory is stale given duration [{}]. Not attempting to make scaling decision", mlMemoryTracker.getStalenessDuration() ); - return buildDecisionAndRequestRefresh(reasonBuilder); + return buildDecisionAndRequestRefresh(reasonBuilder.setSimpleReason(MEMORY_STALE)); } // We need the current node loads to determine if we need to scale up or down List nodeLoads = new ArrayList<>(mlNodes.size()); @@ -1037,7 +1037,7 @@ Optional calculateFutureAvailableCapacity(PersistentTasksC private AutoscalingDeciderResult buildDecisionAndRequestRefresh(MlScalingReason.Builder reasonBuilder) { mlMemoryTracker.asyncRefresh(); - return new AutoscalingDeciderResult(null, reasonBuilder.setSimpleReason(MEMORY_STALE).build()); + return new AutoscalingDeciderResult(null, reasonBuilder.build()); } private Long getAnalyticsMemoryRequirement(String analyticsId) { From dcc87ddb415628d36bc0a05fe59af30b2dfbc4af Mon Sep 17 00:00:00 2001 From: David Turner Date: Sat, 13 Aug 2022 08:32:58 +0100 Subject: [PATCH 191/265] AwaitsFix for #89325 --- .../main/java/org/elasticsearch/transport/TransportService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index e935bb1e1578e..71d225b8c87d5 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -796,7 +796,7 @@ private static void handleSendRequestException( // should not happen innerException.addSuppressed(transportException); logger.error("unexpected exception from handler.handleException", innerException); - assert false : innerException; + // assert false : innerException; TODO AwaitsFix https://github.com/elastic/elasticsearch/issues/89325 } } From 4779893b25f87645fe7dcd695d618632977a8229 Mon Sep 17 00:00:00 2001 From: David Turner Date: Sat, 13 Aug 2022 10:33:15 +0100 Subject: [PATCH 192/265] Introduce BatchExecutionContext (#89323) Replaces the two arguments to `ClusterStateTaskExecutor#execute` with a parameter object called `BatchExecutionContext` so that #85525 can add a new and rarely-used parameter without generating tons of noise. --- .../UpdateTimeSeriesRangeService.java | 7 +- .../TransportDeleteDesiredNodesAction.java | 8 +- .../TransportUpdateDesiredNodesAction.java | 14 +-- .../indices/create/AutoCreateAction.java | 7 +- .../rollover/TransportRolloverAction.java | 10 +-- .../cluster/ClusterStateTaskExecutor.java | 16 ++-- .../cluster/ClusterStateTaskListener.java | 5 +- .../cluster/LocalMasterServiceTask.java | 10 +-- .../action/shard/ShardStateAction.java | 34 ++++--- .../coordination/JoinTaskExecutor.java | 41 ++++----- .../NodeRemovalClusterStateTaskExecutor.java | 19 ++-- .../metadata/MetadataIndexStateService.java | 36 ++++---- .../MetadataIndexTemplateService.java | 6 +- .../metadata/MetadataMappingService.java | 6 +- .../MetadataUpdateSettingsService.java | 8 +- .../cluster/service/MasterService.java | 16 ++-- .../metadata/HealthMetadataService.java | 7 +- .../elasticsearch/ingest/IngestService.java | 12 +-- .../ReservedStateErrorTaskExecutor.java | 11 ++- .../ReservedStateUpdateTaskExecutor.java | 11 ++- .../snapshots/SnapshotsService.java | 44 +++++----- .../ClusterStateTaskExecutorTests.java | 4 +- ...etadataIndexStateServiceBatchingTests.java | 6 +- .../cluster/service/MasterServiceTests.java | 88 ++++++++++--------- .../ReservedClusterStateServiceTests.java | 4 +- .../ClusterStateTaskExecutorUtils.java | 2 +- .../license/StartBasicClusterTask.java | 18 ++-- .../license/StartTrialClusterTask.java | 18 ++-- .../license/LicenseServiceTests.java | 3 +- .../xpack/ilm/IndexLifecycleRunner.java | 11 +-- .../ReservedLifecycleStateServiceTests.java | 2 +- .../rollup/v2/TransportRollupAction.java | 8 +- .../TransportDeleteShutdownNodeAction.java | 13 ++- .../TransportPutShutdownNodeAction.java | 16 ++-- ...ransportDeleteShutdownNodeActionTests.java | 4 +- .../TransportPutShutdownNodeActionTests.java | 7 +- 36 files changed, 268 insertions(+), 264 deletions(-) diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java index fdfa7ea1d5f2c..00096643db1a0 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java @@ -31,7 +31,6 @@ import java.io.IOException; import java.time.Instant; import java.time.temporal.ChronoUnit; -import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; @@ -200,9 +199,9 @@ public void onFailure(Exception e) { private class UpdateTimeSeriesExecutor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - var result = updateTimeSeriesTemporalRange(currentState, Instant.now()); - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + var result = updateTimeSeriesTemporalRange(batchExecutionContext.initialState(), Instant.now()); + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> taskContext.getTask().listener().accept(null)); } return result; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java index d4abe6f25cb9e..ac8d463c36046 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java @@ -27,8 +27,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.List; - public class TransportDeleteDesiredNodesAction extends TransportMasterNodeAction { private final ClusterStateTaskExecutor taskExecutor = new DeleteDesiredNodesExecutor(); @@ -83,11 +81,11 @@ public void onFailure(Exception e) { private static class DeleteDesiredNodesExecutor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> taskContext.getTask().listener().onResponse(ActionResponse.Empty.INSTANCE)); } - return currentState.copyAndUpdateMetadata(metadata -> metadata.removeCustom(DesiredNodesMetadata.TYPE)); + return batchExecutionContext.initialState().copyAndUpdateMetadata(metadata -> metadata.removeCustom(DesiredNodesMetadata.TYPE)); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index 3ef34dadb6d11..ff84146e6a74e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.List; import java.util.Locale; import static java.lang.String.format; @@ -177,10 +176,11 @@ private static class UpdateDesiredNodesExecutor implements ClusterStateTaskExecu } @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - final var initialDesiredNodes = DesiredNodesMetadata.fromClusterState(currentState).getLatestDesiredNodes(); + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + final var initialState = batchExecutionContext.initialState(); + final var initialDesiredNodes = DesiredNodesMetadata.fromClusterState(initialState).getLatestDesiredNodes(); var desiredNodes = initialDesiredNodes; - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { final UpdateDesiredNodesRequest request = taskContext.getTask().request(); if (request.isDryRun()) { try { @@ -205,12 +205,12 @@ public ClusterState execute(ClusterState currentState, List { - ClusterState state = currentState; + this.executor = batchExecutionContext -> { + final var taskContexts = batchExecutionContext.taskContexts(); final Map successfulRequests = Maps.newMapWithExpectedSize(taskContexts.size()); + ClusterState state = batchExecutionContext.initialState(); for (final var taskContext : taskContexts) { final var task = taskContext.getTask(); try { @@ -119,7 +120,7 @@ public TransportAction( taskContext.onFailure(e); } } - if (state != currentState) { + if (state != batchExecutionContext.initialState()) { state = allocationService.reroute(state, "auto-create"); } return state; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index d0248a09f505e..709aa841ad3d1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -259,10 +259,10 @@ record RolloverExecutor( ActiveShardsObserver activeShardsObserver ) implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - final var results = new ArrayList(taskContexts.size()); - var state = currentState; - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + final var results = new ArrayList(batchExecutionContext.taskContexts().size()); + var state = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { try { state = executeTask(state, results, taskContext); } catch (Exception e) { @@ -270,7 +270,7 @@ public ClusterState execute(ClusterState currentState, List) () -> results.stream().map(t -> t.sourceIndexName() + "->" + t.rolloverIndexName()).iterator(), diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index e9b1e6c45951d..32419699fb7eb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -29,11 +29,9 @@ public interface ClusterStateTaskExecutor { * surprisingly many tasks to process in the batch. If it's possible to accumulate the effects of the tasks at a lower level then you * should do that instead. * - * @param currentState The initial cluster state on which the tasks should be executed. - * @param taskContexts A {@link TaskContext} for each task in the batch. Implementations must complete every context in the list. - * @return The resulting cluster state after executing all the tasks. If {code currentState} is returned then no update is published. + * @return The resulting cluster state after executing all the tasks. If {code initialState} is returned then no update is published. */ - ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception; + ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception; /** * @return {@code true} iff this executor should only run on the elected master. @@ -54,7 +52,7 @@ default void clusterStatePublished(ClusterState newClusterState) {} /** * Builds a concise description of a list of tasks (to be used in logging etc.). * - * Note that the tasks given are not necessarily the same as those that will be passed to {@link #execute(ClusterState, List)}. + * Note that the tasks given are not necessarily the same as those that will be passed to {@link #execute(BatchExecutionContext)}. * but are guaranteed to be a subset of them. This method can be called multiple times with different lists before execution. * * @param tasks the tasks to describe. @@ -203,4 +201,12 @@ default void success(ClusterStateAckListener clusterStateAckListener) { */ void onFailure(Exception failure); } + + /** + * Encapsulates the context in which a batch of tasks executes. + * + * @param initialState The initial cluster state on which the tasks should be executed. + * @param taskContexts A {@link TaskContext} for each task in the batch. Implementations must complete every context in the list. + */ + record BatchExecutionContext (ClusterState initialState, List> taskContexts) {} } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java index f58614c00fd40..1e383a6da7df6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java @@ -11,8 +11,6 @@ import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.elasticsearch.cluster.service.MasterService; -import java.util.List; - public interface ClusterStateTaskListener { /** @@ -32,8 +30,7 @@ public interface ClusterStateTaskListener { void onFailure(Exception e); /** - * Called when the result of the {@link ClusterStateTaskExecutor#execute(ClusterState, List)} method have been processed properly by all - * listeners. + * Called when the result of the {@link ClusterStateTaskExecutor#execute} method has been processed properly by all listeners. * * The {@param newState} parameter is the state that was ultimately published. This can lead to surprising behaviour if tasks are * batched together: a later task in the batch may undo or overwrite the changes made by an earlier task. In general you should prefer diff --git a/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java b/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java index 33b0bedb1b450..78d546781d6c8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java @@ -52,14 +52,14 @@ public String describeTasks(List tasks) { } @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) - throws Exception { - final LocalMasterServiceTask thisTask = LocalMasterServiceTask.this; + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + final var thisTask = LocalMasterServiceTask.this; + final var taskContexts = batchExecutionContext.taskContexts(); assert taskContexts.size() == 1 && taskContexts.get(0).getTask() == thisTask : "expected one-element task list containing current object but was " + taskContexts; - thisTask.execute(currentState); + thisTask.execute(batchExecutionContext.initialState()); taskContexts.get(0).success(() -> onPublicationComplete()); - return currentState; + return batchExecutionContext.initialState(); } } ); diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index c2236b50fa1cd..1d0fc67737e16 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -316,18 +316,15 @@ public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, } @Override - public ClusterState execute( - ClusterState currentState, - List> taskContexts - ) throws Exception { + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { List> tasksToBeApplied = new ArrayList<>(); List failedShardsToBeApplied = new ArrayList<>(); List staleShardsToBeApplied = new ArrayList<>(); - - for (final var taskContext : taskContexts) { + final ClusterState initialState = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); FailedShardEntry entry = task.entry(); - IndexMetadata indexMetadata = currentState.metadata().index(entry.getShardId().getIndex()); + IndexMetadata indexMetadata = initialState.metadata().index(entry.getShardId().getIndex()); if (indexMetadata == null) { // tasks that correspond to non-existent indices are marked as successful logger.debug( @@ -377,7 +374,7 @@ public ClusterState execute( } } - ShardRouting matched = currentState.getRoutingTable().getByAllocationId(entry.getShardId(), entry.getAllocationId()); + ShardRouting matched = initialState.getRoutingTable().getByAllocationId(entry.getShardId(), entry.getAllocationId()); if (matched == null) { Set inSyncAllocationIds = indexMetadata.inSyncAllocationIds(entry.getShardId().id()); // mark shard copies without routing entries that are in in-sync allocations set only as stale if the reason why @@ -407,9 +404,9 @@ public ClusterState execute( } assert tasksToBeApplied.size() == failedShardsToBeApplied.size() + staleShardsToBeApplied.size(); - ClusterState maybeUpdatedState = currentState; + ClusterState maybeUpdatedState = initialState; try { - maybeUpdatedState = applyFailedShards(currentState, failedShardsToBeApplied, staleShardsToBeApplied); + maybeUpdatedState = applyFailedShards(initialState, failedShardsToBeApplied, staleShardsToBeApplied); for (final var taskContext : tasksToBeApplied) { taskContext.success(() -> taskContext.getTask().listener().onResponse(TransportResponse.Empty.INSTANCE)); } @@ -625,15 +622,16 @@ public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, } @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { List> tasksToBeApplied = new ArrayList<>(); - List shardRoutingsToBeApplied = new ArrayList<>(taskContexts.size()); + List shardRoutingsToBeApplied = new ArrayList<>(batchExecutionContext.taskContexts().size()); Set seenShardRoutings = new HashSet<>(); // to prevent duplicates final Map updatedTimestampRanges = new HashMap<>(); - for (var taskContext : taskContexts) { + final ClusterState initialState = batchExecutionContext.initialState(); + for (var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); StartedShardEntry entry = task.getEntry(); - final ShardRouting matched = currentState.getRoutingTable().getByAllocationId(entry.shardId, entry.allocationId); + final ShardRouting matched = initialState.getRoutingTable().getByAllocationId(entry.shardId, entry.allocationId); if (matched == null) { // tasks that correspond to non-existent shards are marked as successful. The reason is that we resend shard started // events on every cluster state publishing that does not contain the shard as started yet. This means that old stale @@ -643,7 +641,7 @@ public ClusterState execute(ClusterState currentState, List task.listener().onResponse(TransportResponse.Empty.INSTANCE)); } else { if (matched.primary() && entry.primaryTerm > 0) { - final IndexMetadata indexMetadata = currentState.metadata().index(entry.shardId.getIndex()); + final IndexMetadata indexMetadata = initialState.metadata().index(entry.shardId.getIndex()); assert indexMetadata != null; final long currentPrimaryTerm = indexMetadata.primaryTerm(entry.shardId.id()); if (currentPrimaryTerm != entry.primaryTerm) { @@ -694,7 +692,7 @@ public ClusterState execute(ClusterState currentState, List= shardRoutingsToBeApplied.size(); - ClusterState maybeUpdatedState = currentState; + ClusterState maybeUpdatedState = initialState; try { - maybeUpdatedState = allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied); + maybeUpdatedState = allocationService.applyStartedShards(initialState, shardRoutingsToBeApplied); if (updatedTimestampRanges.isEmpty() == false) { final Metadata.Builder metadataBuilder = Metadata.builder(maybeUpdatedState.metadata()); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java index 43faf92587103..d5756a5b34669 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java @@ -49,45 +49,46 @@ public JoinTaskExecutor(AllocationService allocationService, RerouteService rero } @Override - public ClusterState execute(ClusterState currentState, List> joinTaskContexts) throws Exception { + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { // The current state that MasterService uses might have been updated by a (different) master in a higher term already. If so, stop // processing the current cluster state update, there's no point in continuing to compute it as it will later be rejected by // Coordinator#publish anyhow. - assert joinTaskContexts.isEmpty() == false : "Expected to have non empty join tasks list"; + assert batchExecutionContext.taskContexts().isEmpty() == false : "Expected to have non empty join tasks list"; - var term = joinTaskContexts.stream().mapToLong(t -> t.getTask().term()).max().getAsLong(); + var term = batchExecutionContext.taskContexts().stream().mapToLong(t -> t.getTask().term()).max().getAsLong(); - var split = joinTaskContexts.stream().collect(Collectors.partitioningBy(t -> t.getTask().term() == term)); + var split = batchExecutionContext.taskContexts().stream().collect(Collectors.partitioningBy(t -> t.getTask().term() == term)); for (TaskContext outdated : split.get(false)) { outdated.onFailure( new NotMasterException("Higher term encountered (encountered: " + term + " > used: " + outdated.getTask().term() + ")") ); } - joinTaskContexts = split.get(true); + final var joinTaskContexts = split.get(true); + final var initialState = batchExecutionContext.initialState(); - if (currentState.term() > term) { - logger.trace("encountered higher term {} than current {}, there is a newer master", currentState.term(), term); + if (initialState.term() > term) { + logger.trace("encountered higher term {} than current {}, there is a newer master", initialState.term(), term); throw new NotMasterException( - "Higher term encountered (current: " + currentState.term() + " > used: " + term + "), there is a newer master" + "Higher term encountered (current: " + initialState.term() + " > used: " + term + "), there is a newer master" ); } final boolean isBecomingMaster = joinTaskContexts.stream().anyMatch(t -> t.getTask().isBecomingMaster()); - final DiscoveryNodes currentNodes = currentState.nodes(); + final DiscoveryNodes currentNodes = initialState.nodes(); boolean nodesChanged = false; ClusterState.Builder newState; if (currentNodes.getMasterNode() == null && isBecomingMaster) { - assert currentState.term() < term : "there should be at most one become master task per election (= by term)"; + assert initialState.term() < term : "there should be at most one become master task per election (= by term)"; // use these joins to try and become the master. // Note that we don't have to do any validation of the amount of joining nodes - the commit // during the cluster state publishing guarantees that we have enough - newState = becomeMasterAndTrimConflictingNodes(currentState, joinTaskContexts, term); + newState = becomeMasterAndTrimConflictingNodes(initialState, joinTaskContexts, term); nodesChanged = true; } else if (currentNodes.isLocalNodeElectedMaster()) { - assert currentState.term() == term : "term should be stable for the same master"; - newState = ClusterState.builder(currentState); + assert initialState.term() == term : "term should be stable for the same master"; + newState = ClusterState.builder(initialState); } else { logger.trace("processing node joins, but we are not the master. current master: {}", currentNodes.getMasterNode()); throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not master for join request"); @@ -100,7 +101,7 @@ public ClusterState execute(ClusterState currentState, List joinedNodeIdsByNodeName = new HashMap<>(); for (final var joinTaskContext : joinTaskContexts) { @@ -118,7 +119,7 @@ public ClusterState execute(ClusterState currentState, List { // Update nodeId in VotingConfigExclusion when a new node with excluded node name joins if (CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER.equals(e.getNodeId()) @@ -164,11 +165,11 @@ public ClusterState execute(ClusterState currentState, List { private static final Logger logger = LogManager.getLogger(NodeRemovalClusterStateTaskExecutor.class); @@ -53,33 +51,34 @@ public NodeRemovalClusterStateTaskExecutor(AllocationService allocationService) } @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - final DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(currentState.nodes()); + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + final ClusterState initialState = batchExecutionContext.initialState(); + final DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(initialState.nodes()); boolean removed = false; - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); - if (currentState.nodes().nodeExists(task.node())) { + if (initialState.nodes().nodeExists(task.node())) { remainingNodesBuilder.remove(task.node()); removed = true; } else { logger.debug("node [{}] does not exist in cluster state, ignoring", task); } - taskContext.success(() -> task.onClusterStateProcessed.run()); + taskContext.success(task.onClusterStateProcessed::run); } final ClusterState finalState; if (removed) { - final ClusterState remainingNodesClusterState = remainingNodesClusterState(currentState, remainingNodesBuilder); + final ClusterState remainingNodesClusterState = remainingNodesClusterState(initialState, remainingNodesBuilder); final ClusterState ptasksDisassociatedState = PersistentTasksCustomMetadata.disassociateDeadNodes(remainingNodesClusterState); finalState = allocationService.disassociateDeadNodes( ptasksDisassociatedState, true, - describeTasks(taskContexts.stream().map(TaskContext::getTask).toList()) + describeTasks(batchExecutionContext.taskContexts().stream().map(TaskContext::getTask).toList()) ); } else { // no nodes to remove, keep the current cluster state - finalState = currentState; + finalState = initialState; } return finalState; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index 9d8777da6c733..36a8d86f1e6c3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -172,9 +172,9 @@ public void closeIndices(final CloseIndexClusterStateUpdateRequest request, fina private class AddBlocksToCloseExecutor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - ClusterState state = currentState; - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + ClusterState state = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); try { final Map blockedIndices = new HashMap<>(task.request.indices().length); @@ -227,9 +227,9 @@ private class CloseIndicesExecutor implements ClusterStateTaskExecutor> taskContexts) throws Exception { - ClusterState state = currentState; - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + ClusterState state = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); try { final Tuple> closingResult = closeRoutingTable( @@ -489,10 +489,10 @@ public void addIndexBlock(AddIndexBlockClusterStateUpdateRequest request, Action private class AddBlocksExecutor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - ClusterState state = currentState; + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + ClusterState state = batchExecutionContext.initialState(); - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { try { final var task = taskContext.getTask(); final Tuple> blockResult = addIndexBlock( @@ -554,10 +554,10 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) private static class FinalizeBlocksExecutor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - ClusterState state = currentState; + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + ClusterState state = batchExecutionContext.initialState(); - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { try { final var task = taskContext.getTask(); final Tuple> finalizeResult = finalizeBlock( @@ -1100,13 +1100,13 @@ public static ClusterBlock createUUIDBasedBlock(ClusterBlock clusterBlock) { private class OpenIndicesExecutor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) { - ClusterState state = currentState; + public ClusterState execute(BatchExecutionContext batchExecutionContext) { + ClusterState state = batchExecutionContext.initialState(); try { // build an in-order de-duplicated array of all the indices to open - final Set indicesToOpen = Sets.newLinkedHashSetWithExpectedSize(taskContexts.size()); - for (final var taskContext : taskContexts) { + final Set indicesToOpen = Sets.newLinkedHashSetWithExpectedSize(batchExecutionContext.taskContexts().size()); + for (final var taskContext : batchExecutionContext.taskContexts()) { Collections.addAll(indicesToOpen, taskContext.getTask().request.indices()); } Index[] indices = indicesToOpen.toArray(Index.EMPTY_ARRAY); @@ -1117,12 +1117,12 @@ public ClusterState execute(ClusterState currentState, List TEMPLATE_TASK_EXECUTOR = (currentState, taskContexts) -> { - ClusterState state = currentState; - for (final var taskContext : taskContexts) { + private static final ClusterStateTaskExecutor TEMPLATE_TASK_EXECUTOR = batchExecutionContext -> { + ClusterState state = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { try { final var task = taskContext.getTask(); state = task.execute(state); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java index 5b7c184a3af7b..b8304110031dd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java @@ -94,11 +94,11 @@ public TimeValue ackTimeout() { class PutMappingExecutor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) - throws Exception { + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { Map indexMapperServices = new HashMap<>(); try { - for (final var taskContext : taskContexts) { + var currentState = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); final PutMappingClusterStateUpdateRequest request = task.request; try { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index 5ac4190624b8a..8777467edd33b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -70,9 +70,9 @@ public MetadataUpdateSettingsService( this.indexScopedSettings = indexScopedSettings; this.indicesService = indicesService; this.shardLimitValidator = shardLimitValidator; - this.executor = (currentState, taskContexts) -> { - ClusterState state = currentState; - for (final var taskContext : taskContexts) { + this.executor = batchExecutionContext -> { + ClusterState state = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { try { final var task = taskContext.getTask(); state = task.execute(state); @@ -81,7 +81,7 @@ public MetadataUpdateSettingsService( taskContext.onFailure(e); } } - if (state != currentState) { + if (state != batchExecutionContext.initialState()) { // reroute in case things change that require it (like number of replicas) state = allocationService.reroute(state, "settings update"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 23dd0d14e1fea..29fe2a92602c6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -524,12 +524,16 @@ public void submitUnbatchedStateUpdateTask(String source, ClusterStateUpdateTask private static class UnbatchedExecutor implements ClusterStateTaskExecutor { @Override @SuppressForbidden(reason = "consuming published cluster state for legacy reasons") - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - assert taskContexts.size() == 1 : "this only supports a single task but received " + taskContexts; - final var taskContext = taskContexts.get(0); + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + assert batchExecutionContext.taskContexts().size() == 1 + : "this only supports a single task but received " + batchExecutionContext.taskContexts(); + final var taskContext = batchExecutionContext.taskContexts().get(0); final var task = taskContext.getTask(); - final var newState = task.execute(currentState); - final Consumer publishListener = publishedState -> task.clusterStateProcessed(currentState, publishedState); + final var newState = task.execute(batchExecutionContext.initialState()); + final Consumer publishListener = publishedState -> task.clusterStateProcessed( + batchExecutionContext.initialState(), + publishedState + ); if (task instanceof ClusterStateAckListener ackListener) { taskContext.success(publishListener, ackListener); } else { @@ -972,7 +976,7 @@ private static ClusterState innerExecuteTasks( ) { final var taskContexts = castTaskContexts(executionResults); try { - return executor.execute(previousClusterState, taskContexts); + return executor.execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(previousClusterState, taskContexts)); } catch (Exception e) { logger.trace( () -> format( diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java index 519dbf69be6ed..5ebf5715dccf4 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java @@ -170,10 +170,9 @@ public void onFailure(@Nullable Exception e) { static class Executor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) - throws Exception { - ClusterState updatedState = currentState; - for (TaskContext taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + ClusterState updatedState = batchExecutionContext.initialState(); + for (TaskContext taskContext : batchExecutionContext.taskContexts()) { updatedState = taskContext.getTask().execute(updatedState); taskContext.success(() -> {}); } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 152c9a80f7d62..121553a024435 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -108,11 +108,11 @@ public class IngestService implements ClusterStateApplier, ReportingService PIPELINE_TASK_EXECUTOR = (currentState, taskContexts) -> { - final var allIndexMetadata = currentState.metadata().indices().values(); - final IngestMetadata initialIngestMetadata = currentState.metadata().custom(IngestMetadata.TYPE); + static final ClusterStateTaskExecutor PIPELINE_TASK_EXECUTOR = batchExecutionContext -> { + final var allIndexMetadata = batchExecutionContext.initialState().metadata().indices().values(); + final IngestMetadata initialIngestMetadata = batchExecutionContext.initialState().metadata().custom(IngestMetadata.TYPE); var currentIngestMetadata = initialIngestMetadata; - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { try { final var task = taskContext.getTask(); currentIngestMetadata = task.execute(currentIngestMetadata, allIndexMetadata); @@ -123,8 +123,8 @@ public class IngestService implements ClusterStateApplier, ReportingService b.putCustom(IngestMetadata.TYPE, finalIngestMetadata)); + ? batchExecutionContext.initialState() + : batchExecutionContext.initialState().copyAndUpdateMetadata(b -> b.putCustom(IngestMetadata.TYPE, finalIngestMetadata)); }; /** diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java index ea37daf87ba66..55c1004f68206 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java @@ -14,8 +14,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; -import java.util.List; - /** * Reserved cluster error state task executor *

@@ -25,13 +23,14 @@ record ReservedStateErrorTaskExecutor() implements ClusterStateTaskExecutor> taskContexts) { - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) { + var updatedState = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); - currentState = task.execute(currentState); + updatedState = task.execute(updatedState); taskContext.success(() -> task.listener().onResponse(ActionResponse.Empty.INSTANCE)); } - return currentState; + return updatedState; } @Override diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskExecutor.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskExecutor.java index 9a09f1b253bce..bf840df027837 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskExecutor.java @@ -17,8 +17,6 @@ import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.common.Priority; -import java.util.List; - /** * Reserved cluster state update task executor * @@ -29,12 +27,13 @@ public record ReservedStateUpdateTaskExecutor(RerouteService rerouteService) imp private static final Logger logger = LogManager.getLogger(ReservedStateUpdateTaskExecutor.class); @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - for (final var taskContext : taskContexts) { - currentState = taskContext.getTask().execute(currentState); + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + var updatedState = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { + updatedState = taskContext.getTask().execute(updatedState); taskContext.success(() -> taskContext.getTask().listener().onResponse(ActionResponse.Empty.INSTANCE)); } - return currentState; + return updatedState; } @Override diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 604227bd8d48e..f6a617c5c8011 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -2625,7 +2625,7 @@ public final void clusterStateProcessed(ClusterState oldState, ClusterState newS /** * Computes an updated {@link SnapshotsInProgress} that takes into account an updated version of * {@link SnapshotDeletionsInProgress} that has a {@link SnapshotDeletionsInProgress.Entry} removed from it - * relative to the {@link SnapshotDeletionsInProgress} found in {@code currentState}. + * relative to the {@link SnapshotDeletionsInProgress} found in {@code initialState}. * The removal of a delete from the cluster state can trigger two possible actions on in-progress snapshots: *

    *
  • Snapshots that had unfinished shard snapshots in state {@link ShardSnapshotStatus#UNASSIGNED_QUEUED} that @@ -3037,9 +3037,8 @@ public boolean assertAllListenersResolved() { * * Package private to allow for tests. */ - static final ClusterStateTaskExecutor SHARD_STATE_EXECUTOR = ( - currentState, - taskContexts) -> new SnapshotShardsUpdateContext(currentState, taskContexts).computeUpdatedState(); + static final ClusterStateTaskExecutor SHARD_STATE_EXECUTOR = + batchExecutionContext -> new SnapshotShardsUpdateContext(batchExecutionContext).computeUpdatedState(); private static boolean isQueued(@Nullable ShardSnapshotStatus status) { return status != null && status.state() == ShardState.QUEUED; @@ -3057,11 +3056,11 @@ private static final class SnapshotShardsUpdateContext { // number of started tasks as a result of applying updates to the snapshot entries seen so far private int startedCount = 0; - // current cluster state - private final ClusterState currentState; + // batch execution context + private final ClusterStateTaskExecutor.BatchExecutionContext batchExecutionContext; - // task contexts to be completed on success - private final List> taskContexts; + // initial cluster state for update computation + private final ClusterState initialState; // updates outstanding to be applied to existing snapshot entries private final Map> updatesByRepo; @@ -3069,21 +3068,18 @@ private static final class SnapshotShardsUpdateContext { // updates that were used to update an existing in-progress shard snapshot private final Set executedUpdates = new HashSet<>(); - SnapshotShardsUpdateContext( - ClusterState currentState, - List> taskContexts - ) { - this.currentState = currentState; - this.taskContexts = taskContexts; - updatesByRepo = new HashMap<>(); - for (final var taskContext : taskContexts) { + SnapshotShardsUpdateContext(ClusterStateTaskExecutor.BatchExecutionContext batchExecutionContext) { + this.batchExecutionContext = batchExecutionContext; + this.initialState = batchExecutionContext.initialState(); + this.updatesByRepo = new HashMap<>(); + for (final var taskContext : batchExecutionContext.taskContexts()) { updatesByRepo.computeIfAbsent(taskContext.getTask().snapshot.getRepository(), r -> new ArrayList<>()) .add(taskContext.getTask()); } } ClusterState computeUpdatedState() { - final SnapshotsInProgress existing = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); + final SnapshotsInProgress existing = initialState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); SnapshotsInProgress updated = existing; for (Map.Entry> updates : updatesByRepo.entrySet()) { final String repoName = updates.getKey(); @@ -3098,8 +3094,8 @@ ClusterState computeUpdatedState() { updated = updated.withUpdatedEntriesForRepo(repoName, newEntries); } - final var result = new ShardSnapshotUpdateResult(currentState.metadata(), updated); - for (final var taskContext : taskContexts) { + final var result = new ShardSnapshotUpdateResult(initialState.metadata(), updated); + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> taskContext.getTask().listener.onResponse(result)); } @@ -3109,10 +3105,10 @@ ClusterState computeUpdatedState() { changedCount, startedCount ); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updated).build(); + return ClusterState.builder(initialState).putCustom(SnapshotsInProgress.TYPE, updated).build(); } assert existing == updated; - return currentState; + return initialState; } private SnapshotsInProgress.Entry applyToEntry(SnapshotsInProgress.Entry entry, List updates) { @@ -3260,7 +3256,7 @@ private void tryStartNextTaskAfterCloneUpdated(RepositoryShardId repoShardId, Sh if (entry.isClone() == false) { tryStartSnapshotAfterCloneFinish(repoShardId, updatedState.generation()); } else if (isQueued(entry.shardsByRepoShardId().get(repoShardId))) { - final String localNodeId = currentState.nodes().getLocalNodeId(); + final String localNodeId = initialState.nodes().getLocalNodeId(); assert updatedState.nodeId().equals(localNodeId) : "Clone updated with node id [" + updatedState.nodeId() + "] but local node id is [" + localNodeId + "]"; startShardOperation(clonesBuilder(), localNodeId, updatedState.generation(), repoShardId); @@ -3278,7 +3274,7 @@ private void tryStartNextTaskAfterSnapshotUpdated(ShardId shardId, ShardSnapshot // shard snapshot was completed, we check if we can start a clone operation for the same repo shard startShardOperation( clonesBuilder(), - currentState.nodes().getLocalNodeId(), + initialState.nodes().getLocalNodeId(), updatedState.generation(), repoShardId ); @@ -3307,7 +3303,7 @@ private void startShardSnapshot(RepositoryShardId repoShardId, ShardGeneration g + "] because it's a normal snapshot but did not"; // work out the node to run the snapshot task on as it might have changed from the previous operation if it was a clone // or there was a primary failover - final IndexRoutingTable indexRouting = currentState.routingTable().index(index); + final IndexRoutingTable indexRouting = initialState.routingTable().index(index); final ShardRouting shardRouting; if (indexRouting == null) { shardRouting = null; diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTaskExecutorTests.java index a5c5da31b8bd7..bf09853f3133a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTaskExecutorTests.java @@ -34,9 +34,7 @@ public String toString() { } public void testDescribeTasks() { - final ClusterStateTaskExecutor executor = (currentState, taskContexts) -> { - throw new AssertionError("should not be called"); - }; + final ClusterStateTaskExecutor executor = batchExecutionContext -> { throw new AssertionError("should not be called"); }; assertThat("describes an empty list", executor.describeTasks(List.of()), equalTo("")); assertThat("describes a singleton list", executor.describeTasks(List.of(new TestTask("a task"))), equalTo("Task{a task}")); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java index 335cf6e4dd21b..e52a3726cc046 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java @@ -203,13 +203,13 @@ private static CheckedRunnable blockMasterService(MasterService maste "block", new ExpectSuccessTask(), ClusterStateTaskConfig.build(Priority.URGENT), - (currentState, taskContexts) -> { + batchExecutionContext -> { executionBarrier.await(10, TimeUnit.SECONDS); // notify test thread that the master service is blocked executionBarrier.await(10, TimeUnit.SECONDS); // wait for test thread to release us - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> {}); } - return currentState; + return batchExecutionContext.initialState(); } ); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 7cdc310b89a64..03ec1b692efbd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -222,11 +222,11 @@ public void waitForTaskCompletion(Task task) {} ClusterStateTaskConfig.build(Priority.NORMAL), new ClusterStateTaskExecutor<>() { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) { - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> {}); } - return ClusterState.builder(currentState).build(); + return ClusterState.builder(batchExecutionContext.initialState()).build(); } @Override @@ -341,11 +341,11 @@ public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws Interru ClusterStateTaskConfig.build(Priority.NORMAL), new ClusterStateTaskExecutor<>() { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) { - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> { throw new RuntimeException("testing exception handling"); }); } - return ClusterState.builder(currentState).build(); + return ClusterState.builder(batchExecutionContext.initialState()).build(); } @Override @@ -542,14 +542,14 @@ public void addExpectedTaskCount(int taskCount) { } @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) { assertTrue("Should execute all tasks at once", executed.compareAndSet(false, true)); - assertThat("Should execute all tasks at once", taskContexts.size(), equalTo(expectedTaskCount)); + assertThat("Should execute all tasks at once", batchExecutionContext.taskContexts().size(), equalTo(expectedTaskCount)); executionCountDown.countDown(); - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> {}); } - return currentState; + return batchExecutionContext.initialState(); } } @@ -566,13 +566,13 @@ public ClusterState execute(ClusterState currentState, List { + batchExecutionContext -> { executionBarrier.await(10, TimeUnit.SECONDS); // notify test thread that the master service is blocked executionBarrier.await(10, TimeUnit.SECONDS); // wait for test thread to release us - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> {}); } - return currentState; + return batchExecutionContext.initialState(); } ); @@ -707,19 +707,19 @@ class TaskExecutor implements ClusterStateTaskExecutor { private final List assignments = new ArrayList<>(); @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) { - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) { + for (final var taskContext : batchExecutionContext.taskContexts()) { assertThat("All tasks should belong to this executor", assignments, hasItem(taskContext.getTask())); } - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.getTask().execute(); } - executed.addAndGet(taskContexts.size()); - ClusterState maybeUpdatedClusterState = currentState; + executed.addAndGet(batchExecutionContext.taskContexts().size()); + ClusterState maybeUpdatedClusterState = batchExecutionContext.initialState(); if (randomBoolean()) { - maybeUpdatedClusterState = ClusterState.builder(currentState).build(); + maybeUpdatedClusterState = ClusterState.builder(batchExecutionContext.initialState()).build(); batches.incrementAndGet(); assertThat( "All cluster state modifications should be executed on a single thread", @@ -728,7 +728,7 @@ public ClusterState execute(ClusterState currentState, List> t ); } - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> { processedStates.incrementAndGet(); processedStatesLatch.get().countDown(); @@ -845,14 +845,14 @@ public void onFailure(Exception e) { } } - final ClusterStateTaskExecutor executor = (currentState, taskContexts) -> { + final ClusterStateTaskExecutor executor = batchExecutionContext -> { if (randomBoolean()) { throw new RuntimeException("simulated"); } else { - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.onFailure(new RuntimeException("simulated")); } - return currentState; + return batchExecutionContext.initialState(); } }; @@ -924,11 +924,11 @@ public void onFailure(Exception e) { final var executor = new ClusterStateTaskExecutor() { @Override @SuppressForbidden(reason = "consuming published cluster state for legacy reasons") - public ClusterState execute(ClusterState currentState, List> taskContexts) { - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(taskContext.getTask().publishListener::onResponse); } - return ClusterState.builder(currentState).build(); + return ClusterState.builder(batchExecutionContext.initialState()).build(); } }; @@ -1043,8 +1043,8 @@ public void testBlockingCallInClusterStateTaskListenerFails() throws Interrupted "testBlockingCallInClusterStateTaskListenerFails", new ExpectSuccessTask(), ClusterStateTaskConfig.build(Priority.NORMAL), - (currentState, taskContexts) -> { - for (final var taskContext : taskContexts) { + batchExecutionContext -> { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> { BaseFuture future = new BaseFuture() { }; @@ -1062,7 +1062,7 @@ public void testBlockingCallInClusterStateTaskListenerFails() throws Interrupted } }); } - return ClusterState.builder(currentState).build(); + return ClusterState.builder(batchExecutionContext.initialState()).build(); } ); @@ -1399,11 +1399,13 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) "success-test", new Task(), ClusterStateTaskConfig.build(Priority.NORMAL), - (currentState, taskContexts) -> { - for (final var taskContext : taskContexts) { + batchExecutionContext -> { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(latch::countDown, taskContext.getTask()); } - return randomBoolean() ? currentState : ClusterState.builder(currentState).build(); + return randomBoolean() + ? batchExecutionContext.initialState() + : ClusterState.builder(batchExecutionContext.initialState()).build(); } ); @@ -1439,11 +1441,13 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) "success-test", new Task(), ClusterStateTaskConfig.build(Priority.NORMAL), - (currentState, taskContexts) -> { - for (final var taskContext : taskContexts) { + batchExecutionContext -> { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(latch::countDown, new LatchAckListener(latch)); } - return randomBoolean() ? currentState : ClusterState.builder(currentState).build(); + return randomBoolean() + ? batchExecutionContext.initialState() + : ClusterState.builder(batchExecutionContext.initialState()).build(); } ); @@ -1479,11 +1483,13 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) "success-test", new Task(), ClusterStateTaskConfig.build(Priority.NORMAL), - (currentState, taskContexts) -> { - for (final var taskContext : taskContexts) { + batchExecutionContext -> { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(new LatchAckListener(latch)); } - return randomBoolean() ? currentState : ClusterState.builder(currentState).build(); + return randomBoolean() + ? batchExecutionContext.initialState() + : ClusterState.builder(batchExecutionContext.initialState()).build(); } ); @@ -1755,11 +1761,11 @@ class Executor implements ClusterStateTaskExecutor { final Semaphore semaphore = new Semaphore(0); @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) { - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) { + for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> semaphore.release()); } - return currentState; + return batchExecutionContext.initialState(); } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index 27ae0157cd121..e361407671e4f 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -179,7 +179,7 @@ public void success(Consumer publishedStateConsumer, ClusterStateA public void onFailure(Exception failure) {} }; - ClusterState newState = taskExecutor.execute(state, List.of(taskContext)); + ClusterState newState = taskExecutor.execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(state, List.of(taskContext))); assertEquals(state, newState); assertTrue(successCalled.get()); verify(task, times(1)).execute(any()); @@ -235,7 +235,7 @@ public void onFailure(Exception failure) {} ReservedStateErrorTaskExecutor executor = new ReservedStateErrorTaskExecutor(); - ClusterState newState = executor.execute(state, List.of(taskContext)); + ClusterState newState = executor.execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(state, List.of(taskContext))); verify(task, times(1)).execute(any()); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/service/ClusterStateTaskExecutorUtils.java b/test/framework/src/main/java/org/elasticsearch/cluster/service/ClusterStateTaskExecutorUtils.java index 4736a18c7bba2..e03648114b367 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/service/ClusterStateTaskExecutorUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/service/ClusterStateTaskExecutorUtils.java @@ -64,7 +64,7 @@ public static ClusterState executeHandlingR final var taskContexts = StreamSupport.stream(tasks.spliterator(), false).>map( TestTaskContext::new ).toList(); - final var resultingState = executor.execute(originalState, taskContexts); + final var resultingState = executor.execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(originalState, taskContexts)); assertNotNull(resultingState); for (final var taskContext : taskContexts) { final var testTaskContext = (TestTaskContext) taskContext; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java index 4e0a336fa241f..59637fde08f6a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.core.XPackPlugin; import java.time.Clock; -import java.util.List; import java.util.Map; import java.util.UUID; @@ -128,18 +127,19 @@ public String getDescription() { static class Executor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - XPackPlugin.checkReadyForXPackCustomMetadata(currentState); - final LicensesMetadata originalLicensesMetadata = currentState.metadata().custom(LicensesMetadata.TYPE); + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + final var initialState = batchExecutionContext.initialState(); + XPackPlugin.checkReadyForXPackCustomMetadata(initialState); + final LicensesMetadata originalLicensesMetadata = initialState.metadata().custom(LicensesMetadata.TYPE); var currentLicensesMetadata = originalLicensesMetadata; - for (final var taskContext : taskContexts) { - currentLicensesMetadata = taskContext.getTask().execute(currentLicensesMetadata, currentState.nodes(), taskContext); + for (final var taskContext : batchExecutionContext.taskContexts()) { + currentLicensesMetadata = taskContext.getTask().execute(currentLicensesMetadata, initialState.nodes(), taskContext); } if (currentLicensesMetadata == originalLicensesMetadata) { - return currentState; + return initialState; } else { - return ClusterState.builder(currentState) - .metadata(Metadata.builder(currentState.metadata()).putCustom(LicensesMetadata.TYPE, currentLicensesMetadata)) + return ClusterState.builder(initialState) + .metadata(Metadata.builder(initialState.metadata()).putCustom(LicensesMetadata.TYPE, currentLicensesMetadata)) .build(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index 895e12e4754a3..4579218677d2c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -19,7 +19,6 @@ import java.time.Clock; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.UUID; @@ -115,18 +114,19 @@ public void onFailure(@Nullable Exception e) { static class Executor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - XPackPlugin.checkReadyForXPackCustomMetadata(currentState); - final LicensesMetadata originalLicensesMetadata = currentState.metadata().custom(LicensesMetadata.TYPE); + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + final var initialState = batchExecutionContext.initialState(); + XPackPlugin.checkReadyForXPackCustomMetadata(initialState); + final LicensesMetadata originalLicensesMetadata = initialState.metadata().custom(LicensesMetadata.TYPE); var currentLicensesMetadata = originalLicensesMetadata; - for (final var taskContext : taskContexts) { - currentLicensesMetadata = taskContext.getTask().execute(currentLicensesMetadata, currentState.nodes(), taskContext); + for (final var taskContext : batchExecutionContext.taskContexts()) { + currentLicensesMetadata = taskContext.getTask().execute(currentLicensesMetadata, initialState.nodes(), taskContext); } if (currentLicensesMetadata == originalLicensesMetadata) { - return currentState; + return initialState; } else { - return ClusterState.builder(currentState) - .metadata(Metadata.builder(currentState.metadata()).putCustom(LicensesMetadata.TYPE, currentLicensesMetadata)) + return ClusterState.builder(initialState) + .metadata(Metadata.builder(initialState.metadata()).putCustom(LicensesMetadata.TYPE, currentLicensesMetadata)) .build(); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java index 16a932e127210..6a436f37749c2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java @@ -211,7 +211,8 @@ public void testStartBasicStartsNewLicenseIfFieldsDifferent() throws Exception { m -> m.putCustom(LicensesMetadata.TYPE, new LicensesMetadata(oldLicense, null)) ); - ClusterState updatedState = taskExecutorCaptor.getValue().execute(oldState, List.of(taskContext)); + ClusterState updatedState = taskExecutorCaptor.getValue() + .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(oldState, List.of(taskContext))); // Pass updated state to listener to trigger onResponse call to wrapped `future` listenerCaptor.getValue().run(); assertion.accept(future); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java index 086c7c37131ef..0ae0701e8bd44 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java @@ -39,7 +39,6 @@ import java.util.Collections; import java.util.HashSet; -import java.util.List; import java.util.Locale; import java.util.Objects; import java.util.Set; @@ -61,13 +60,15 @@ class IndexLifecycleRunner { new ClusterStateTaskExecutor<>() { @Override @SuppressForbidden(reason = "consuming published cluster state for legacy reasons") - public ClusterState execute(ClusterState currentState, List> taskContexts) { - ClusterState state = currentState; - for (final var taskContext : taskContexts) { + public ClusterState execute(BatchExecutionContext batchExecutionContext) { + ClusterState state = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { try { final var task = taskContext.getTask(); state = task.execute(state); - taskContext.success(new ClusterStateTaskExecutor.LegacyClusterTaskResultActionListener(task, currentState)); + taskContext.success( + new ClusterStateTaskExecutor.LegacyClusterTaskResultActionListener(task, batchExecutionContext.initialState()) + ); } catch (Exception e) { taskContext.onFailure(e); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java index b9e25163c8cb8..e12401282d699 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java @@ -268,7 +268,7 @@ public void onFailure(Exception failure) { } }; - task.execute(state, List.of(context)); + task.execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(state, List.of(context))); return null; }).when(clusterService).submitStateUpdateTask(anyString(), any(), any(), any()); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java index 3268c4d323e7a..0a67a6f500521 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java @@ -91,11 +91,9 @@ public class TransportRollupAction extends AcknowledgedTransportMasterNodeAction /** * This is the cluster state task executor for cluster state update actions. */ - private static final ClusterStateTaskExecutor STATE_UPDATE_TASK_EXECUTOR = ( - currentState, - taskContexts) -> { - ClusterState state = currentState; - for (final var taskContext : taskContexts) { + private static final ClusterStateTaskExecutor STATE_UPDATE_TASK_EXECUTOR = batchExecutionContext -> { + ClusterState state = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { try { final var task = taskContext.getTask(); state = task.execute(state); diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java index bb68fa3f081e1..68de9c2a51598 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java @@ -34,7 +34,6 @@ import org.elasticsearch.xpack.shutdown.DeleteShutdownNodeAction.Request; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.elasticsearch.cluster.metadata.NodesShutdownMetadata.getShutdownsOrEmpty; @@ -80,10 +79,10 @@ public void onFailure(Exception e) { // package private for tests class DeleteShutdownNodeExecutor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - var shutdownMetadata = new HashMap<>(getShutdownsOrEmpty(currentState).getAllNodeMetadataMap()); + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + var shutdownMetadata = new HashMap<>(getShutdownsOrEmpty(batchExecutionContext.initialState()).getAllNodeMetadataMap()); boolean changed = false; - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { var request = taskContext.getTask().request(); try { changed |= deleteShutdownNodeState(shutdownMetadata, request); @@ -95,11 +94,11 @@ public ClusterState execute(ClusterState currentState, List ackAndReroute(request, taskContext.getTask().listener(), reroute)); } if (changed == false) { - return currentState; + return batchExecutionContext.initialState(); } - return ClusterState.builder(currentState) + return ClusterState.builder(batchExecutionContext.initialState()) .metadata( - Metadata.builder(currentState.metadata()) + Metadata.builder(batchExecutionContext.initialState().metadata()) .putCustom(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(shutdownMetadata)) ) .build(); diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java index e588c49420051..812f020131d43 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.xpack.shutdown.PutShutdownNodeAction.Request; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.function.Predicate; @@ -117,11 +116,12 @@ public void onFailure(Exception e) { // package private for tests class PutShutdownNodeExecutor implements ClusterStateTaskExecutor { @Override - public ClusterState execute(ClusterState currentState, List> taskContexts) throws Exception { - var shutdownMetadata = new HashMap<>(getShutdownsOrEmpty(currentState).getAllNodeMetadataMap()); - Predicate nodeExistsPredicate = currentState.getNodes()::nodeExists; + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + final var initialState = batchExecutionContext.initialState(); + var shutdownMetadata = new HashMap<>(getShutdownsOrEmpty(initialState).getAllNodeMetadataMap()); + Predicate nodeExistsPredicate = batchExecutionContext.initialState().getNodes()::nodeExists; boolean changed = false; - for (final var taskContext : taskContexts) { + for (final var taskContext : batchExecutionContext.taskContexts()) { var request = taskContext.getTask().request(); try { changed |= putShutdownNodeState(shutdownMetadata, nodeExistsPredicate, request); @@ -133,11 +133,11 @@ public ClusterState execute(ClusterState currentState, List ackAndMaybeReroute(request, taskContext.getTask().listener(), reroute)); } if (changed == false) { - return currentState; + return batchExecutionContext.initialState(); } - return ClusterState.builder(currentState) + return ClusterState.builder(batchExecutionContext.initialState()) .metadata( - Metadata.builder(currentState.metadata()) + Metadata.builder(batchExecutionContext.initialState().metadata()) .putCustom(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(shutdownMetadata)) ) .build(); diff --git a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java index 3aecf5db24032..2df7b2ead0849 100644 --- a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java +++ b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskExecutor.TaskContext; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -76,7 +77,8 @@ public void testNoop() throws Exception { var taskExecutor = ArgumentCaptor.forClass(DeleteShutdownNodeExecutor.class); verify(clusterService).submitStateUpdateTask(any(), updateTask.capture(), taskConfig.capture(), taskExecutor.capture()); when(taskContext.getTask()).thenReturn(updateTask.getValue()); - ClusterState gotState = taskExecutor.getValue().execute(ClusterState.EMPTY_STATE, List.of(taskContext)); + ClusterState gotState = taskExecutor.getValue() + .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(ClusterState.EMPTY_STATE, List.of(taskContext))); assertThat(gotState, sameInstance(ClusterState.EMPTY_STATE)); } } diff --git a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeActionTests.java b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeActionTests.java index 61948cf4b4a5f..39a70853a7405 100644 --- a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeActionTests.java +++ b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskExecutor.TaskContext; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata.Type; @@ -74,7 +75,8 @@ public void testNoop() throws Exception { var taskExecutor = ArgumentCaptor.forClass(PutShutdownNodeExecutor.class); verify(clusterService).submitStateUpdateTask(any(), updateTask.capture(), taskConfig.capture(), taskExecutor.capture()); when(taskContext.getTask()).thenReturn(updateTask.getValue()); - ClusterState stableState = taskExecutor.getValue().execute(ClusterState.EMPTY_STATE, List.of(taskContext)); + ClusterState stableState = taskExecutor.getValue() + .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(ClusterState.EMPTY_STATE, List.of(taskContext))); // run the request again, there should be no call to submit an update task clearInvocations(clusterService); @@ -85,7 +87,8 @@ public void testNoop() throws Exception { action.masterOperation(null, request, ClusterState.EMPTY_STATE, ActionListener.noop()); verify(clusterService).submitStateUpdateTask(any(), updateTask.capture(), taskConfig.capture(), taskExecutor.capture()); when(taskContext.getTask()).thenReturn(updateTask.getValue()); - ClusterState gotState = taskExecutor.getValue().execute(stableState, List.of(taskContext)); + ClusterState gotState = taskExecutor.getValue() + .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(stableState, List.of(taskContext))); assertThat(gotState, sameInstance(stableState)); } } From 104ad7fd9200d4c2507b9993f66d263fbbb341c0 Mon Sep 17 00:00:00 2001 From: weizijun Date: Mon, 15 Aug 2022 16:46:09 +0800 Subject: [PATCH 193/265] TSDB: fix time series field caps bwc yaml test (#89236) Stops the repeated test failures due to #89171 --- .../rest-api-spec/test/field_caps/40_time_series.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml index 345939c44883a..9f5af66542f56 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/40_time_series.yml @@ -1,8 +1,8 @@ --- setup: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: indices.create: @@ -110,8 +110,8 @@ setup: "Get simple time series field caps": - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: field_caps: @@ -172,8 +172,8 @@ setup: "Get time series field caps with conflicts": - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.0.99" + reason: introduced in 8.1.0 - do: field_caps: From 51f89f43e56b68cd41449c63a8eea0c50b5c37aa Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 15 Aug 2022 10:09:14 +0100 Subject: [PATCH 194/265] Handle rejection in LeaderChecker (#89326) Closes #89325 --- .../cluster/coordination/LeaderChecker.java | 20 +++++++++++++++++-- .../transport/TransportService.java | 2 +- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java index 7164155c62acc..0b7e5842d8fc1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java @@ -17,6 +17,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; @@ -331,12 +333,26 @@ public void handleException(TransportException exp) { void leaderFailed(Supplier messageSupplier, Exception e) { if (isClosed.compareAndSet(false, true)) { - transportService.getThreadPool().executor(Names.CLUSTER_COORDINATION).execute(new Runnable() { + transportService.getThreadPool().executor(Names.CLUSTER_COORDINATION).execute(new AbstractRunnable() { @Override - public void run() { + protected void doRun() { leaderFailureListener.onLeaderFailure(messageSupplier, e); } + @Override + public void onRejection(Exception e2) { + e.addSuppressed(e2); + logger.debug("rejected execution of onLeaderFailure", e); + assert e2 instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown() : e; + } + + @Override + public void onFailure(Exception e2) { + e2.addSuppressed(e); + logger.error("failed execution of onLeaderFailure", e2); + assert false : e2; + } + @Override public String toString() { return "notification of leader failure: " + e.getMessage(); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 71d225b8c87d5..e935bb1e1578e 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -796,7 +796,7 @@ private static void handleSendRequestException( // should not happen innerException.addSuppressed(transportException); logger.error("unexpected exception from handler.handleException", innerException); - // assert false : innerException; TODO AwaitsFix https://github.com/elastic/elasticsearch/issues/89325 + assert false : innerException; } } From 621c38cde5e0795d2ab96dca9618f251a6d8e354 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 15 Aug 2022 10:42:50 +0100 Subject: [PATCH 195/265] Report better error for GCS credentials load failure (#89336) Today if the GCS credentials file setting is invalid we report some kind of JSON parsing error but it's not clear what JSON is being parsed so the error is hard to track down. This commit adds the problematic setting name to the exception message. --- docs/changelog/89336.yaml | 5 +++++ .../gcs/GoogleCloudStorageClientSettings.java | 11 +++++----- ...GoogleCloudStorageClientSettingsTests.java | 20 +++++++++++++++++++ 3 files changed, 30 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/89336.yaml diff --git a/docs/changelog/89336.yaml b/docs/changelog/89336.yaml new file mode 100644 index 0000000000000..4dde7e4545c47 --- /dev/null +++ b/docs/changelog/89336.yaml @@ -0,0 +1,5 @@ +pr: 89336 +summary: Report better error for GCS credentials load failure +area: Snapshot/Restore +type: bug +issues: [] diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java index 7a510e8215bbb..e52fa91a61ade 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java @@ -18,9 +18,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import java.io.IOException; import java.io.InputStream; -import java.io.UncheckedIOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Proxy; @@ -246,13 +244,14 @@ static GoogleCloudStorageClientSettings getClientSettings(final Settings setting * {@code null} if no service account is defined. */ static ServiceAccountCredentials loadCredential(final Settings settings, final String clientName) { + final var credentialsFileSetting = CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace(clientName); try { - if (CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace(clientName).exists(settings) == false) { + if (credentialsFileSetting.exists(settings) == false) { // explicitly returning null here so that the default credential // can be loaded later when creating the Storage client return null; } - try (InputStream credStream = CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace(clientName).get(settings)) { + try (InputStream credStream = credentialsFileSetting.get(settings)) { final Collection scopes = Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL); return SocketAccess.doPrivilegedIOException(() -> { final ServiceAccountCredentials credentials = ServiceAccountCredentials.fromStream(credStream); @@ -262,8 +261,8 @@ static ServiceAccountCredentials loadCredential(final Settings settings, final S return credentials; }); } - } catch (final IOException e) { - throw new UncheckedIOException(e); + } catch (final Exception e) { + throw new IllegalArgumentException("failed to load GCS client credentials from [" + credentialsFileSetting.getKey() + "]", e); } } diff --git a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java index 0ce4932ad4edd..d2813ff3c56c8 100644 --- a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java +++ b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java @@ -43,6 +43,7 @@ import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.getClientSettings; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.loadCredential; +import static org.hamcrest.Matchers.equalTo; public class GoogleCloudStorageClientSettingsTests extends ESTestCase { @@ -89,6 +90,25 @@ public void testLoadCredential() throws Exception { assertGoogleCredential(expectedClientSettings.getCredential(), loadCredential(randomClient.v2(), clientName)); } + public void testLoadInvalidCredential() throws Exception { + final List> deprecationWarnings = new ArrayList<>(); + final Settings.Builder settings = Settings.builder(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String clientName = randomBoolean() ? "default" : randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + randomClient(clientName, settings, secureSettings, deprecationWarnings); + secureSettings.setFile( + CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace(clientName).getKey(), + "invalid".getBytes(StandardCharsets.UTF_8) + ); + assertThat( + expectThrows( + IllegalArgumentException.class, + () -> loadCredential(settings.setSecureSettings(secureSettings).build(), clientName) + ).getMessage(), + equalTo("failed to load GCS client credentials from [gcs.client." + clientName + ".credentials_file]") + ); + } + public void testProjectIdDefaultsToCredentials() throws Exception { final String clientName = randomAlphaOfLength(5); final Tuple credentials = randomCredential(clientName); From 745947e8546f25186fe64470a9512daa04c45bb7 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 15 Aug 2022 10:50:15 +0100 Subject: [PATCH 196/265] Capture deprecation warnings in batched master tasks (#85525) It's possible for a cluster state update task to emit deprecation warnings, but if the task is executed in a batch then these warnings will be exposed to the listener for every item in the batch. With this commit we introduce a mechanism for tasks to capture just the warnings relevant to them, along with assertions that warnings are not inadvertently leaked back to the master service. Closes #85506 --- docs/changelog/85525.yaml | 6 + .../UpdateTimeSeriesRangeService.java | 5 +- .../action/support/AutoCreateIndexIT.java | 111 +++++++++++++ .../TransportUpdateDesiredNodesAction.java | 2 +- .../indices/create/AutoCreateAction.java | 2 +- .../rollover/TransportRolloverAction.java | 6 +- .../cluster/ClusterStateTaskExecutor.java | 28 +++- .../cluster/LocalMasterServiceTask.java | 8 +- .../action/shard/ShardStateAction.java | 3 +- .../NodeRemovalClusterStateTaskExecutor.java | 20 +-- .../metadata/MetadataIndexStateService.java | 10 +- .../MetadataIndexTemplateService.java | 4 +- .../metadata/MetadataMappingService.java | 2 +- .../MetadataUpdateSettingsService.java | 8 +- .../cluster/service/MasterService.java | 151 ++++++++++++++---- .../metadata/HealthMetadataService.java | 4 +- .../elasticsearch/ingest/IngestService.java | 4 +- .../ReservedStateErrorTaskExecutor.java | 4 +- .../ReservedStateUpdateTaskExecutor.java | 4 +- .../cluster/service/MasterServiceTests.java | 118 +++++++++++++- .../ReservedClusterStateServiceTests.java | 19 ++- .../ClusterStateTaskExecutorUtils.java | 10 +- .../license/StartBasicClusterTask.java | 4 +- .../license/StartTrialClusterTask.java | 4 +- .../license/LicenseServiceTests.java | 2 +- .../xpack/ilm/IndexLifecycleRunner.java | 4 +- .../ReservedLifecycleStateServiceTests.java | 8 +- .../rollup/v2/TransportRollupAction.java | 4 +- .../TransportDeleteShutdownNodeAction.java | 2 +- .../TransportPutShutdownNodeAction.java | 2 +- ...ransportDeleteShutdownNodeActionTests.java | 2 +- .../TransportPutShutdownNodeActionTests.java | 4 +- 32 files changed, 483 insertions(+), 82 deletions(-) create mode 100644 docs/changelog/85525.yaml create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/action/support/AutoCreateIndexIT.java diff --git a/docs/changelog/85525.yaml b/docs/changelog/85525.yaml new file mode 100644 index 0000000000000..18a57f8477180 --- /dev/null +++ b/docs/changelog/85525.yaml @@ -0,0 +1,6 @@ +pr: 85525 +summary: Capture deprecation warnings in batched master tasks +area: Cluster Coordination +type: bug +issues: + - 85506 diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java index 00096643db1a0..abd6ba93d3d16 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java @@ -200,7 +200,10 @@ public void onFailure(Exception e) { private class UpdateTimeSeriesExecutor implements ClusterStateTaskExecutor { @Override public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { - var result = updateTimeSeriesTemporalRange(batchExecutionContext.initialState(), Instant.now()); + final ClusterState result; + try (var ignored = batchExecutionContext.dropHeadersContext()) { + result = updateTimeSeriesTemporalRange(batchExecutionContext.initialState(), Instant.now()); + } for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> taskContext.getTask().listener().accept(null)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/AutoCreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/AutoCreateIndexIT.java new file mode 100644 index 0000000000000..d0e151d506341 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/AutoCreateIndexIT.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentType; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.not; + +public class AutoCreateIndexIT extends ESIntegTestCase { + public void testBatchingWithDeprecationWarnings() throws Exception { + final var masterNodeClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var barrier = new CyclicBarrier(2); + masterNodeClusterService.submitStateUpdateTask( + "block", + e -> { assert false : e; }, + ClusterStateTaskConfig.build(Priority.NORMAL), + batchExecutionContext -> { + barrier.await(10, TimeUnit.SECONDS); + barrier.await(10, TimeUnit.SECONDS); + batchExecutionContext.taskContexts().forEach(c -> c.success(() -> {})); + return batchExecutionContext.initialState(); + } + ); + + barrier.await(10, TimeUnit.SECONDS); + + final var countDownLatch = new CountDownLatch(2); + + final var client = client(); + client.prepareIndex("no-dot").setSource("{}", XContentType.JSON).execute(new ActionListener<>() { + @Override + public void onResponse(IndexResponse indexResponse) { + try { + final var warningHeaders = client.threadPool().getThreadContext().getResponseHeaders().get("Warning"); + if (warningHeaders != null) { + assertThat( + warningHeaders, + not( + hasItems( + containsString("index names starting with a dot are reserved for hidden indices and system indices") + ) + ) + ); + } + } finally { + countDownLatch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + countDownLatch.countDown(); + assert false : e; + } + }); + + client.prepareIndex(".has-dot").setSource("{}", XContentType.JSON).execute(new ActionListener<>() { + @Override + public void onResponse(IndexResponse indexResponse) { + try { + final var warningHeaders = client.threadPool().getThreadContext().getResponseHeaders().get("Warning"); + assertNotNull(warningHeaders); + assertThat( + warningHeaders, + hasItems(containsString("index names starting with a dot are reserved for hidden indices and system indices")) + ); + } finally { + countDownLatch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + countDownLatch.countDown(); + assert false : e; + } + }); + + assertBusy( + () -> assertThat( + masterNodeClusterService.getMasterService() + .pendingTasks() + .stream() + .map(pendingClusterTask -> pendingClusterTask.getSource().string()) + .toList(), + hasItems("auto create [no-dot]", "auto create [.has-dot]") + ) + ); + + barrier.await(10, TimeUnit.SECONDS); + assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index ff84146e6a74e..9ee56e9ba8fa9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -192,7 +192,7 @@ public ClusterState execute(BatchExecutionContext batchE continue; } final var previousDesiredNodes = desiredNodes; - try { + try (var ignored = taskContext.captureResponseHeaders()) { desiredNodes = updateDesiredNodes(desiredNodes, request); } catch (Exception e) { taskContext.onFailure(e); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index 3a85c7e02c7a1..94611ea9ec9ed 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -113,7 +113,7 @@ public TransportAction( ClusterState state = batchExecutionContext.initialState(); for (final var taskContext : taskContexts) { final var task = taskContext.getTask(); - try { + try (var ignored = taskContext.captureResponseHeaders()) { state = task.execute(state, successfulRequests, taskContext); assert successfulRequests.containsKey(task.request); } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 709aa841ad3d1..cd11ecfbd30d3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -263,7 +263,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionCo final var results = new ArrayList(batchExecutionContext.taskContexts().size()); var state = batchExecutionContext.initialState(); for (final var taskContext : batchExecutionContext.taskContexts()) { - try { + try (var ignored = taskContext.captureResponseHeaders()) { state = executeTask(state, results, taskContext); } catch (Exception e) { taskContext.onFailure(e); @@ -280,7 +280,9 @@ public ClusterState execute(BatchExecutionContext batchExecutionCo 1024, reason ); - state = allocationService.reroute(state, reason.toString()); + try (var ignored = batchExecutionContext.dropHeadersContext()) { + state = allocationService.reroute(state, reason.toString()); + } } return state; } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index 32419699fb7eb..69a54e5068a5b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -8,9 +8,11 @@ package org.elasticsearch.cluster; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Releasable; import java.util.List; import java.util.function.Consumer; +import java.util.function.Supplier; /** * An executor for batches of cluster state update tasks. @@ -52,8 +54,8 @@ default void clusterStatePublished(ClusterState newClusterState) {} /** * Builds a concise description of a list of tasks (to be used in logging etc.). * - * Note that the tasks given are not necessarily the same as those that will be passed to {@link #execute(BatchExecutionContext)}. - * but are guaranteed to be a subset of them. This method can be called multiple times with different lists before execution. + * Note that the tasks given are not necessarily the same as those that will be passed to {@link #execute} but are guaranteed to be a + * subset of them. This method can be called multiple times with different lists before execution. * * @param tasks the tasks to describe. * @return A string which describes the batch of tasks. @@ -200,6 +202,11 @@ default void success(ClusterStateAckListener clusterStateAckListener) { * @param failure The exception with which the task failed. */ void onFailure(Exception failure); + + /** + * Creates a context which captures any response headers (e.g. deprecation warnings) to be fed to the task's listener on completion. + */ + Releasable captureResponseHeaders(); } /** @@ -207,6 +214,21 @@ default void success(ClusterStateAckListener clusterStateAckListener) { * * @param initialState The initial cluster state on which the tasks should be executed. * @param taskContexts A {@link TaskContext} for each task in the batch. Implementations must complete every context in the list. + * @param dropHeadersContextSupplier Supplies a context (a resource for use in a try-with-resources block) which captures and drops any + * emitted response headers, for cases where things like deprecation warnings may be emitted but + * cannot be associated with any specific task. */ - record BatchExecutionContext (ClusterState initialState, List> taskContexts) {} + record BatchExecutionContext ( + ClusterState initialState, + List> taskContexts, + Supplier dropHeadersContextSupplier + ) { + /** + * Creates a context (a resource for use in a try-with-resources block) which captures and drops any emitted response headers, for + * cases where things like deprecation warnings may be emitted but cannot be associated with any specific task. + */ + public Releasable dropHeadersContext() { + return dropHeadersContextSupplier.get(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java b/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java index 78d546781d6c8..033f6d1f8dd39 100644 --- a/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java @@ -23,7 +23,7 @@ public LocalMasterServiceTask(Priority priority) { this.priority = priority; } - protected void execute(ClusterState currentState) throws Exception {} + protected void execute(ClusterState currentState) {} @Override public final void clusterStateProcessed(ClusterState oldState, ClusterState newState) { @@ -52,12 +52,14 @@ public String describeTasks(List tasks) { } @Override - public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + public ClusterState execute(BatchExecutionContext batchExecutionContext) { final var thisTask = LocalMasterServiceTask.this; final var taskContexts = batchExecutionContext.taskContexts(); assert taskContexts.size() == 1 && taskContexts.get(0).getTask() == thisTask : "expected one-element task list containing current object but was " + taskContexts; - thisTask.execute(batchExecutionContext.initialState()); + try (var ignored = taskContexts.get(0).captureResponseHeaders()) { + thisTask.execute(batchExecutionContext.initialState()); + } taskContexts.get(0).success(() -> onPublicationComplete()); return batchExecutionContext.initialState(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 1d0fc67737e16..200a2024a610b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -405,7 +405,8 @@ public ClusterState execute(BatchExecutionContext batchEx assert tasksToBeApplied.size() == failedShardsToBeApplied.size() + staleShardsToBeApplied.size(); ClusterState maybeUpdatedState = initialState; - try { + try (var ignored = batchExecutionContext.dropHeadersContext()) { + // drop deprecation warnings arising from the computation (reroute etc). maybeUpdatedState = applyFailedShards(initialState, failedShardsToBeApplied, staleShardsToBeApplied); for (final var taskContext : tasksToBeApplied) { taskContext.success(() -> taskContext.getTask().listener().onResponse(TransportResponse.Empty.INSTANCE)); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java index a44acfd3f2375..af59b6a065109 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java @@ -66,22 +66,22 @@ public ClusterState execute(BatchExecutionContext batchExecutionContext) t taskContext.success(task.onClusterStateProcessed::run); } - final ClusterState finalState; + if (removed == false) { + // no nodes to remove, keep the current cluster state + return initialState; + } + + try (var ignored = batchExecutionContext.dropHeadersContext()) { + // suppress deprecation warnings e.g. from reroute() - if (removed) { - final ClusterState remainingNodesClusterState = remainingNodesClusterState(initialState, remainingNodesBuilder); - final ClusterState ptasksDisassociatedState = PersistentTasksCustomMetadata.disassociateDeadNodes(remainingNodesClusterState); - finalState = allocationService.disassociateDeadNodes( + final var remainingNodesClusterState = remainingNodesClusterState(initialState, remainingNodesBuilder); + final var ptasksDisassociatedState = PersistentTasksCustomMetadata.disassociateDeadNodes(remainingNodesClusterState); + return allocationService.disassociateDeadNodes( ptasksDisassociatedState, true, describeTasks(batchExecutionContext.taskContexts().stream().map(TaskContext::getTask).toList()) ); - } else { - // no nodes to remove, keep the current cluster state - finalState = initialState; } - - return finalState; } // visible for testing diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index 36a8d86f1e6c3..1243d9bb26d19 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -279,7 +279,10 @@ public ClusterState execute(BatchExecutionContext batchExecuti } } - return allocationService.reroute(state, "indices closed"); + try (var ignored = batchExecutionContext.dropHeadersContext()) { + // reroute may encounter deprecated features but the resulting warnings are not associated with any particular task + return allocationService.reroute(state, "indices closed"); + } } } @@ -1103,7 +1106,10 @@ private class OpenIndicesExecutor implements ClusterStateTaskExecutor batchExecutionContext) { ClusterState state = batchExecutionContext.initialState(); - try { + try (var ignored = batchExecutionContext.dropHeadersContext()) { + // we may encounter deprecated settings but they are not directly related to opening the indices, nor are they really + // associated with any particular tasks, so we drop them + // build an in-order de-duplicated array of all the indices to open final Set indicesToOpen = Sets.newLinkedHashSetWithExpectedSize(batchExecutionContext.taskContexts().size()); for (final var taskContext : batchExecutionContext.taskContexts()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index ff3e5771477b9..3dc021cea6daa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -130,7 +130,9 @@ public class MetadataIndexTemplateService { for (final var taskContext : batchExecutionContext.taskContexts()) { try { final var task = taskContext.getTask(); - state = task.execute(state); + try (var ignored = taskContext.captureResponseHeaders()) { + state = task.execute(state); + } taskContext.success(() -> task.listener.onResponse(AcknowledgedResponse.TRUE)); } catch (Exception e) { taskContext.onFailure(e); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java index b8304110031dd..de97a8b5b2901 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java @@ -101,7 +101,7 @@ public ClusterState execute(BatchExecutionContext ((UpdateTask) task).onFailure(new ProcessClusterEventTimeoutException(timeout, task.source))); + .execute(() -> ((UpdateTask) task).onFailure(new ProcessClusterEventTimeoutException(timeout, task.source), () -> {})); } @Override @@ -188,8 +193,9 @@ public String describeTasks(List tasks) { ); } - public void onFailure(Exception e) { + public void onFailure(Exception e, Runnable restoreResponseHeaders) { try (ThreadContext.StoredContext ignore = threadContextSupplier.get()) { + restoreResponseHeaders.run(); listener.onFailure(e); } catch (Exception inner) { inner.addSuppressed(e); @@ -198,10 +204,21 @@ public void onFailure(Exception e) { } @Nullable - public ContextPreservingAckListener wrapInTaskContext(@Nullable ClusterStateAckListener clusterStateAckListener) { + public ContextPreservingAckListener wrapInTaskContext( + @Nullable ClusterStateAckListener clusterStateAckListener, + Runnable restoreResponseHeaders + ) { return clusterStateAckListener == null ? null - : new ContextPreservingAckListener(Objects.requireNonNull(clusterStateAckListener), threadContextSupplier); + : new ContextPreservingAckListener( + Objects.requireNonNull(clusterStateAckListener), + threadContextSupplier, + restoreResponseHeaders + ); + } + + ThreadContext getThreadContext() { + return threadPool.getThreadContext(); } } } @@ -250,7 +267,7 @@ private void runTasks( if (previousClusterState.nodes().isLocalNodeElectedMaster() == false && executor.runOnlyOnMaster()) { logger.debug("failing [{}]: local node is no longer master", summary); - updateTasks.forEach(t -> t.onFailure(new NotMasterException("no longer master, failing [" + t.source() + "]"))); + updateTasks.forEach(t -> t.onFailure(new NotMasterException("no longer master, failing [" + t.source() + "]"), () -> {})); return; } @@ -258,12 +275,12 @@ private void runTasks( final var executionResults = updateTasks.stream().map(ExecutionResult::new).toList(); final var newClusterState = patchVersions( previousClusterState, - executeTasks(previousClusterState, executionResults, executor, summary) + executeTasks(previousClusterState, executionResults, executor, summary, threadPool.getThreadContext()) ); // fail all tasks that have failed for (final var executionResult : executionResults) { if (executionResult.failure != null) { - executionResult.updateTask.onFailure(executionResult.failure); + executionResult.updateTask.onFailure(executionResult.failure, executionResult::restoreResponseHeaders); } } final TimeValue computationTime = getTimeSince(computationStartTime); @@ -529,7 +546,10 @@ public ClusterState execute(BatchExecutionContext batchE : "this only supports a single task but received " + batchExecutionContext.taskContexts(); final var taskContext = batchExecutionContext.taskContexts().get(0); final var task = taskContext.getTask(); - final var newState = task.execute(batchExecutionContext.initialState()); + final ClusterState newState; + try (var ignored = taskContext.captureResponseHeaders()) { + newState = task.execute(batchExecutionContext.initialState()); + } final Consumer publishListener = publishedState -> task.clusterStateProcessed( batchExecutionContext.initialState(), publishedState @@ -644,7 +664,11 @@ private void logExecutionTime(TimeValue executionTime, String activity, BatchSum * callbacks, and also logs and swallows any exceptions thrown. One of these is created for each task in the batch that passes a * {@link ClusterStateAckListener} to {@link ClusterStateTaskExecutor.TaskContext#success}. */ - private record ContextPreservingAckListener(ClusterStateAckListener listener, Supplier context) { + private record ContextPreservingAckListener( + ClusterStateAckListener listener, + Supplier context, + Runnable restoreResponseHeaders + ) { public boolean mustAck(DiscoveryNode discoveryNode) { return listener.mustAck(discoveryNode); @@ -652,6 +676,7 @@ public boolean mustAck(DiscoveryNode discoveryNode) { public void onAckSuccess() { try (ThreadContext.StoredContext ignore = context.get()) { + restoreResponseHeaders.run(); listener.onAllNodesAcked(); } catch (Exception inner) { logger.error("exception thrown by listener while notifying on all nodes acked", inner); @@ -660,6 +685,7 @@ public void onAckSuccess() { public void onAckFailure(@Nullable Exception e) { try (ThreadContext.StoredContext ignore = context.get()) { + restoreResponseHeaders.run(); listener.onAckFailure(e); } catch (Exception inner) { inner.addSuppressed(e); @@ -669,6 +695,7 @@ public void onAckFailure(@Nullable Exception e) { public void onAckTimeout() { try (ThreadContext.StoredContext ignore = context.get()) { + restoreResponseHeaders.run(); listener.onAckTimeout(); } catch (Exception e) { logger.error("exception thrown by listener while notifying on ack timeout", e); @@ -807,6 +834,9 @@ private static class ExecutionResult impleme @Nullable // if the task is incomplete or succeeded Exception failure; + @Nullable + Map> responseHeaders; + ExecutionResult(Batcher.UpdateTask updateTask) { this.updateTask = updateTask; } @@ -877,6 +907,40 @@ public void onFailure(Exception failure) { this.failure = Objects.requireNonNull(failure); } + @Override + public Releasable captureResponseHeaders() { + final var threadContext = updateTask.getThreadContext(); + final var storedContext = threadContext.newStoredContext(false); + return Releasables.wrap(() -> { + final var newResponseHeaders = threadContext.getResponseHeaders(); + if (newResponseHeaders.isEmpty()) { + return; + } + if (responseHeaders == null) { + responseHeaders = new HashMap<>(newResponseHeaders); + } else { + for (final var newResponseHeader : newResponseHeaders.entrySet()) { + responseHeaders.compute(newResponseHeader.getKey(), (ignored, oldValue) -> { + if (oldValue == null) { + return newResponseHeader.getValue(); + } + return CollectionUtils.concatLists(oldValue, newResponseHeader.getValue()); + }); + } + } + }, storedContext); + } + + private void restoreResponseHeaders() { + if (responseHeaders != null) { + for (final var responseHeader : responseHeaders.entrySet()) { + for (final var value : responseHeader.getValue()) { + updateTask.getThreadContext().addResponseHeader(responseHeader.getKey(), value); + } + } + } + } + void onBatchFailure(Exception failure) { // if the whole batch resulted in an exception then this overrides any task-level results whether successful or not this.failure = Objects.requireNonNull(failure); @@ -890,6 +954,7 @@ void onPublishSuccess(ClusterState newClusterState) { return; } try (ThreadContext.StoredContext ignored = updateTask.threadContextSupplier.get()) { + restoreResponseHeaders(); if (onPublicationSuccess == null) { publishedStateConsumer.accept(newClusterState); } else { @@ -906,6 +971,7 @@ void onClusterStateUnchanged(ClusterState clusterState) { return; } try (ThreadContext.StoredContext ignored = updateTask.threadContextSupplier.get()) { + restoreResponseHeaders(); if (onPublicationSuccess == null) { publishedStateConsumer.accept(clusterState); } else { @@ -922,6 +988,7 @@ void onPublishFailure(FailedToCommitClusterStateException e) { return; } try (ThreadContext.StoredContext ignored = updateTask.threadContextSupplier.get()) { + restoreResponseHeaders(); getTask().onFailure(e); } catch (Exception inner) { inner.addSuppressed(e); @@ -931,7 +998,7 @@ void onPublishFailure(FailedToCommitClusterStateException e) { ContextPreservingAckListener getContextPreservingAckListener() { assert incomplete() == false; - return updateTask.wrapInTaskContext(clusterStateAckListener); + return updateTask.wrapInTaskContext(clusterStateAckListener, this::restoreResponseHeaders); } @Override @@ -944,9 +1011,10 @@ private static ClusterState executeTasks( ClusterState previousClusterState, List> executionResults, ClusterStateTaskExecutor executor, - BatchSummary summary + BatchSummary summary, + ThreadContext threadContext ) { - final var resultingState = innerExecuteTasks(previousClusterState, executionResults, executor, summary); + final var resultingState = innerExecuteTasks(previousClusterState, executionResults, executor, summary, threadContext); if (previousClusterState != resultingState && previousClusterState.nodes().isLocalNodeElectedMaster() && (resultingState.nodes().isLocalNodeElectedMaster() == false)) { @@ -972,28 +1040,49 @@ private static ClusterState innerExecuteTasks( ClusterState previousClusterState, List> executionResults, ClusterStateTaskExecutor executor, - BatchSummary summary + BatchSummary summary, + ThreadContext threadContext ) { final var taskContexts = castTaskContexts(executionResults); - try { - return executor.execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(previousClusterState, taskContexts)); - } catch (Exception e) { - logger.trace( - () -> format( - "failed to execute cluster state update (on version: [%s], uuid: [%s]) for [%s]\n%s%s%s", - previousClusterState.version(), - previousClusterState.stateUUID(), - summary, - previousClusterState.nodes(), - previousClusterState.routingTable(), - previousClusterState.getRoutingNodes() - ), - e - ); - for (final var executionResult : executionResults) { - executionResult.onBatchFailure(e); + try (var ignored = threadContext.newStoredContext(false)) { + // if the executor leaks a response header then this will cause a test failure, but we also store the context here to be sure + // to avoid leaking headers in production that were missed by tests + + try { + return executor.execute( + new ClusterStateTaskExecutor.BatchExecutionContext<>( + previousClusterState, + taskContexts, + () -> threadContext.newStoredContext(false) + ) + ); + } catch (Exception e) { + logger.trace( + () -> format( + "failed to execute cluster state update (on version: [%s], uuid: [%s]) for [%s]\n%s%s%s", + previousClusterState.version(), + previousClusterState.stateUUID(), + summary, + previousClusterState.nodes(), + previousClusterState.routingTable(), + previousClusterState.getRoutingNodes() + ), + e + ); + for (final var executionResult : executionResults) { + executionResult.onBatchFailure(e); + } + return previousClusterState; + } finally { + assert threadContext.getResponseHeaders().isEmpty() + : """ + Batched task executors must marshal response headers to the appropriate task context (e.g. using \ + TaskContext#captureResponseHeaders) or suppress them (e.g. using BatchExecutionContext#dropHeadersContext) and \ + must not leak them to the master service, but executor [""" + + executor + + "] leaked the following headers: " + + threadContext.getResponseHeaders(); } - return previousClusterState; } } diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java index 5ebf5715dccf4..91aeab6799da2 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java @@ -173,7 +173,9 @@ static class Executor implements ClusterStateTaskExecutor batchExecutionContext) throws Exception { ClusterState updatedState = batchExecutionContext.initialState(); for (TaskContext taskContext : batchExecutionContext.taskContexts()) { - updatedState = taskContext.getTask().execute(updatedState); + try (var ignored = taskContext.captureResponseHeaders()) { + updatedState = taskContext.getTask().execute(updatedState); + } taskContext.success(() -> {}); } return updatedState; diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 121553a024435..5f66dbb2daa07 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -115,7 +115,9 @@ public class IngestService implements ClusterStateApplier, ReportingService task.listener.onResponse(AcknowledgedResponse.TRUE)); } catch (Exception e) { taskContext.onFailure(e); diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java index 55c1004f68206..4bd7b7fb44f66 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java @@ -27,7 +27,9 @@ public ClusterState execute(BatchExecutionContext batchE var updatedState = batchExecutionContext.initialState(); for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); - updatedState = task.execute(updatedState); + try (var ignored = taskContext.captureResponseHeaders()) { + updatedState = task.execute(updatedState); + } taskContext.success(() -> task.listener().onResponse(ActionResponse.Empty.INSTANCE)); } return updatedState; diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskExecutor.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskExecutor.java index bf840df027837..7715623409974 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskExecutor.java @@ -30,7 +30,9 @@ public record ReservedStateUpdateTaskExecutor(RerouteService rerouteService) imp public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { var updatedState = batchExecutionContext.initialState(); for (final var taskContext : batchExecutionContext.taskContexts()) { - updatedState = taskContext.getTask().execute(updatedState); + try (var ignored = taskContext.captureResponseHeaders()) { + updatedState = taskContext.getTask().execute(updatedState); + } taskContext.success(() -> taskContext.getTask().listener().onResponse(ActionResponse.Empty.INSTANCE)); } return updatedState; diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 03ec1b692efbd..cd7aa8d8918ca 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -643,6 +643,7 @@ public void testClusterStateBatchedUpdates() throws BrokenBarrierException, Inte AtomicInteger submittedTasks = new AtomicInteger(); AtomicInteger processedStates = new AtomicInteger(); SetOnce processedStatesLatch = new SetOnce<>(); + final String responseHeaderName = randomAlphaOfLength(10); class Task implements ClusterStateTaskListener { private final AtomicBoolean executed = new AtomicBoolean(); @@ -653,6 +654,7 @@ class Task implements ClusterStateTaskListener { } public void execute() { + threadPool.getThreadContext().addResponseHeader(responseHeaderName, toString()); if (executed.compareAndSet(false, true) == false) { throw new AssertionError("Task [" + id + "] should only be executed once"); } else { @@ -713,7 +715,19 @@ public ClusterState execute(BatchExecutionContext batchExecutionContext) { } for (final var taskContext : batchExecutionContext.taskContexts()) { - taskContext.getTask().execute(); + if (randomBoolean()) { + try (var ignored = taskContext.captureResponseHeaders()) { + threadPool.getThreadContext().addResponseHeader(responseHeaderName, randomAlphaOfLength(10)); + } + } + try (var ignored = taskContext.captureResponseHeaders()) { + taskContext.getTask().execute(); + } + if (randomBoolean()) { + try (var ignored = taskContext.captureResponseHeaders()) { + threadPool.getThreadContext().addResponseHeader(responseHeaderName, randomAlphaOfLength(10)); + } + } } executed.addAndGet(batchExecutionContext.taskContexts().size()); @@ -730,6 +744,10 @@ public ClusterState execute(BatchExecutionContext batchExecutionContext) { for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> { + assertThat( + threadPool.getThreadContext().getResponseHeaders().get(responseHeaderName), + hasItem(taskContext.getTask().toString()) + ); processedStates.incrementAndGet(); processedStatesLatch.get().countDown(); }); @@ -902,8 +920,10 @@ public void testTaskNotificationAfterPublication() throws Exception { class Task implements ClusterStateTaskListener { final ActionListener publishListener; + final String responseHeaderValue; - Task(ActionListener publishListener) { + Task(String responseHeaderValue, ActionListener publishListener) { + this.responseHeaderValue = responseHeaderValue; this.publishListener = publishListener; } @@ -921,11 +941,16 @@ public void onFailure(Exception e) { final String testContextHeaderName = "test-context-header"; final ThreadContext threadContext = threadPool.getThreadContext(); + final var testResponseHeaderName = "test-response-header"; + final var executor = new ClusterStateTaskExecutor() { @Override @SuppressForbidden(reason = "consuming published cluster state for legacy reasons") public ClusterState execute(BatchExecutionContext batchExecutionContext) { for (final var taskContext : batchExecutionContext.taskContexts()) { + try (var ignored = taskContext.captureResponseHeaders()) { + threadPool.getThreadContext().addResponseHeader(testResponseHeaderName, taskContext.getTask().responseHeaderValue); + } taskContext.success(taskContext.getTask().publishListener::onResponse); } return ClusterState.builder(batchExecutionContext.initialState()).build(); @@ -967,11 +992,13 @@ public void onFailure(Exception e) { for (int i = 0; i < toSubmit; i++) { try (ThreadContext.StoredContext ignored = threadContext.newStoredContext(false)) { final var testContextHeaderValue = randomAlphaOfLength(10); + final var testResponseHeaderValue = randomAlphaOfLength(10); threadContext.putHeader(testContextHeaderName, testContextHeaderValue); - final var task = new Task(new ActionListener<>() { + final var task = new Task(testResponseHeaderValue, new ActionListener<>() { @Override public void onResponse(ClusterState clusterState) { assertEquals(testContextHeaderValue, threadContext.getHeader(testContextHeaderName)); + assertEquals(List.of(testResponseHeaderValue), threadContext.getResponseHeaders().get(testResponseHeaderName)); assertSame(publishedState.get(), clusterState); publishSuccessCountdown.countDown(); } @@ -1007,8 +1034,9 @@ public void onFailure(Exception e) { for (int i = 0; i < toSubmit; i++) { try (ThreadContext.StoredContext ignored = threadContext.newStoredContext(false)) { final String testContextHeaderValue = randomAlphaOfLength(10); + final String testResponseHeaderValue = randomAlphaOfLength(10); threadContext.putHeader(testContextHeaderName, testContextHeaderValue); - final var task = new Task(new ActionListener<>() { + final var task = new Task(testResponseHeaderValue, new ActionListener<>() { @Override public void onResponse(ClusterState clusterState) { throw new AssertionError("should not succeed"); @@ -1017,6 +1045,7 @@ public void onResponse(ClusterState clusterState) { @Override public void onFailure(Exception e) { assertEquals(testContextHeaderValue, threadContext.getHeader(testContextHeaderName)); + assertEquals(List.of(testResponseHeaderValue), threadContext.getResponseHeaders().get(testResponseHeaderName)); assertThat(e, instanceOf(FailedToCommitClusterStateException.class)); assertThat(e.getMessage(), equalTo(exceptionMessage)); publishFailureCountdown.countDown(); @@ -1322,6 +1351,8 @@ public void testAcking() throws InterruptedException { ) ) { + final var responseHeaderName = "test-response-header"; + final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).localNodeId(node1.getId()).masterNodeId(node1.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) @@ -1401,7 +1432,17 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) ClusterStateTaskConfig.build(Priority.NORMAL), batchExecutionContext -> { for (final var taskContext : batchExecutionContext.taskContexts()) { - taskContext.success(latch::countDown, taskContext.getTask()); + final var responseHeaderValue = randomAlphaOfLength(10); + try (var ignored = taskContext.captureResponseHeaders()) { + threadPool.getThreadContext().addResponseHeader(responseHeaderName, responseHeaderValue); + } + taskContext.success(() -> { + assertThat( + threadPool.getThreadContext().getResponseHeaders().get(responseHeaderName), + equalTo(List.of(responseHeaderValue)) + ); + latch.countDown(); + }, taskContext.getTask()); } return randomBoolean() ? batchExecutionContext.initialState() @@ -1496,6 +1537,66 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) assertTrue(latch.await(10, TimeUnit.SECONDS)); } + // check that exception from acking is passed to listener + { + final CountDownLatch latch = new CountDownLatch(1); + + publisherRef.set((clusterChangedEvent, publishListener, ackListener) -> { + publishListener.onResponse(null); + ackListener.onCommit(TimeValue.ZERO); + ackListener.onNodeAck(node1, null); + ackListener.onNodeAck(node2, new ElasticsearchException("simulated")); + ackListener.onNodeAck(node3, null); + }); + + class Task implements ClusterStateTaskListener { + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + + @Override + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { + fail(); + } + } + + masterService.submitStateUpdateTask( + "node-ack-fail-test", + new Task(), + ClusterStateTaskConfig.build(Priority.NORMAL), + batchExecutionContext -> { + for (final var taskContext : batchExecutionContext.taskContexts()) { + final var responseHeaderValue = randomAlphaOfLength(10); + try (var ignored = taskContext.captureResponseHeaders()) { + threadPool.getThreadContext().addResponseHeader(responseHeaderName, responseHeaderValue); + } + taskContext.success(new LatchAckListener(latch) { + @Override + public void onAllNodesAcked() { + fail(); + } + + @Override + public void onAckFailure(Exception e) { + assertThat( + threadPool.getThreadContext().getResponseHeaders().get(responseHeaderName), + equalTo(List.of(responseHeaderValue)) + ); + assertThat(e, instanceOf(ElasticsearchException.class)); + assertThat(e.getMessage(), equalTo("simulated")); + latch.countDown(); + } + }); + } + return ClusterState.builder(batchExecutionContext.initialState()).build(); + } + ); + + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + // check that we don't time out before even committing the cluster state { final CountDownLatch latch = new CountDownLatch(1); @@ -1554,11 +1655,14 @@ public void onAckTimeout() { ackListener.onNodeAck(node3, null); }); + final var responseHeaderValue = randomAlphaOfLength(10); + masterService.submitUnbatchedStateUpdateTask( "test2", new AckedClusterStateUpdateTask(ackedRequest(ackTimeout, null), null) { @Override public ClusterState execute(ClusterState currentState) { + threadPool.getThreadContext().addResponseHeader(responseHeaderName, responseHeaderValue); return ClusterState.builder(currentState).build(); } @@ -1580,6 +1684,10 @@ public void onFailure(Exception e) { @Override public void onAckTimeout() { + assertThat( + threadPool.getThreadContext().getResponseHeaders().get(responseHeaderName), + equalTo(List.of(responseHeaderValue)) + ); latch.countDown(); } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index e361407671e4f..2734e693e5773 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Releasable; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -177,9 +178,16 @@ public void success(Consumer publishedStateConsumer, ClusterStateA @Override public void onFailure(Exception failure) {} + + @Override + public Releasable captureResponseHeaders() { + return null; + } }; - ClusterState newState = taskExecutor.execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(state, List.of(taskContext))); + ClusterState newState = taskExecutor.execute( + new ClusterStateTaskExecutor.BatchExecutionContext<>(state, List.of(taskContext), () -> null) + ); assertEquals(state, newState); assertTrue(successCalled.get()); verify(task, times(1)).execute(any()); @@ -231,11 +239,18 @@ public void success(Consumer publishedStateConsumer, ClusterStateA @Override public void onFailure(Exception failure) {} + + @Override + public Releasable captureResponseHeaders() { + return null; + } }; ReservedStateErrorTaskExecutor executor = new ReservedStateErrorTaskExecutor(); - ClusterState newState = executor.execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(state, List.of(taskContext))); + ClusterState newState = executor.execute( + new ClusterStateTaskExecutor.BatchExecutionContext<>(state, List.of(taskContext), () -> null) + ); verify(task, times(1)).execute(any()); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/service/ClusterStateTaskExecutorUtils.java b/test/framework/src/main/java/org/elasticsearch/cluster/service/ClusterStateTaskExecutorUtils.java index e03648114b367..0ecd0ac27c0f0 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/service/ClusterStateTaskExecutorUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/service/ClusterStateTaskExecutorUtils.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.Releasable; import java.util.function.Consumer; import java.util.stream.StreamSupport; @@ -64,7 +65,9 @@ public static ClusterState executeHandlingR final var taskContexts = StreamSupport.stream(tasks.spliterator(), false).>map( TestTaskContext::new ).toList(); - final var resultingState = executor.execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(originalState, taskContexts)); + final var resultingState = executor.execute( + new ClusterStateTaskExecutor.BatchExecutionContext<>(originalState, taskContexts, () -> null) + ); assertNotNull(resultingState); for (final var taskContext : taskContexts) { final var testTaskContext = (TestTaskContext) taskContext; @@ -146,6 +149,11 @@ public void success(Consumer publishedStateListener) { this.succeeded = true; } + @Override + public Releasable captureResponseHeaders() { + return () -> {}; + } + @Override public String toString() { return "TestTaskContext[" + task + "]"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java index 59637fde08f6a..5c81fde14bd26 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java @@ -133,7 +133,9 @@ public ClusterState execute(BatchExecutionContext batchEx final LicensesMetadata originalLicensesMetadata = initialState.metadata().custom(LicensesMetadata.TYPE); var currentLicensesMetadata = originalLicensesMetadata; for (final var taskContext : batchExecutionContext.taskContexts()) { - currentLicensesMetadata = taskContext.getTask().execute(currentLicensesMetadata, initialState.nodes(), taskContext); + try (var ignored = taskContext.captureResponseHeaders()) { + currentLicensesMetadata = taskContext.getTask().execute(currentLicensesMetadata, initialState.nodes(), taskContext); + } } if (currentLicensesMetadata == originalLicensesMetadata) { return initialState; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index 4579218677d2c..bb4bb6adcb073 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -120,7 +120,9 @@ public ClusterState execute(BatchExecutionContext batchEx final LicensesMetadata originalLicensesMetadata = initialState.metadata().custom(LicensesMetadata.TYPE); var currentLicensesMetadata = originalLicensesMetadata; for (final var taskContext : batchExecutionContext.taskContexts()) { - currentLicensesMetadata = taskContext.getTask().execute(currentLicensesMetadata, initialState.nodes(), taskContext); + try (var ignored = taskContext.captureResponseHeaders()) { + currentLicensesMetadata = taskContext.getTask().execute(currentLicensesMetadata, initialState.nodes(), taskContext); + } } if (currentLicensesMetadata == originalLicensesMetadata) { return initialState; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java index 6a436f37749c2..29439d7cac47d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java @@ -212,7 +212,7 @@ public void testStartBasicStartsNewLicenseIfFieldsDifferent() throws Exception { ); ClusterState updatedState = taskExecutorCaptor.getValue() - .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(oldState, List.of(taskContext))); + .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(oldState, List.of(taskContext), () -> null)); // Pass updated state to listener to trigger onResponse call to wrapped `future` listenerCaptor.getValue().run(); assertion.accept(future); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java index 0ae0701e8bd44..d450cbd7f37ad 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java @@ -65,7 +65,9 @@ public ClusterState execute(BatchExecutionContext publishedStateConsumer, ClusterStateA public void onFailure(Exception failure) { fail("Shouldn't fail here"); } + + @Override + public Releasable captureResponseHeaders() { + return null; + } }; - task.execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(state, List.of(context))); + task.execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(state, List.of(context), () -> null)); return null; }).when(clusterService).submitStateUpdateTask(anyString(), any(), any(), any()); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java index 0a67a6f500521..c6f95df65d669 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java @@ -96,7 +96,9 @@ public class TransportRollupAction extends AcknowledgedTransportMasterNodeAction for (final var taskContext : batchExecutionContext.taskContexts()) { try { final var task = taskContext.getTask(); - state = task.execute(state); + try (var ignored = taskContext.captureResponseHeaders()) { + state = task.execute(state); + } taskContext.success(() -> task.listener.onResponse(AcknowledgedResponse.TRUE)); } catch (Exception e) { taskContext.onFailure(e); diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java index 68de9c2a51598..40c18618cab9b 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java @@ -84,7 +84,7 @@ public ClusterState execute(BatchExecutionContext batchE boolean changed = false; for (final var taskContext : batchExecutionContext.taskContexts()) { var request = taskContext.getTask().request(); - try { + try (var ignored = taskContext.captureResponseHeaders()) { changed |= deleteShutdownNodeState(shutdownMetadata, request); } catch (Exception e) { taskContext.onFailure(e); diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java index 812f020131d43..590447459fd79 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java @@ -123,7 +123,7 @@ public ClusterState execute(BatchExecutionContext batchExec boolean changed = false; for (final var taskContext : batchExecutionContext.taskContexts()) { var request = taskContext.getTask().request(); - try { + try (var ignored = taskContext.captureResponseHeaders()) { changed |= putShutdownNodeState(shutdownMetadata, nodeExistsPredicate, request); } catch (Exception e) { taskContext.onFailure(e); diff --git a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java index 2df7b2ead0849..a29b5784f618d 100644 --- a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java +++ b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java @@ -78,7 +78,7 @@ public void testNoop() throws Exception { verify(clusterService).submitStateUpdateTask(any(), updateTask.capture(), taskConfig.capture(), taskExecutor.capture()); when(taskContext.getTask()).thenReturn(updateTask.getValue()); ClusterState gotState = taskExecutor.getValue() - .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(ClusterState.EMPTY_STATE, List.of(taskContext))); + .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(ClusterState.EMPTY_STATE, List.of(taskContext), () -> null)); assertThat(gotState, sameInstance(ClusterState.EMPTY_STATE)); } } diff --git a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeActionTests.java b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeActionTests.java index 39a70853a7405..dd4258d37840f 100644 --- a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeActionTests.java +++ b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeActionTests.java @@ -76,7 +76,7 @@ public void testNoop() throws Exception { verify(clusterService).submitStateUpdateTask(any(), updateTask.capture(), taskConfig.capture(), taskExecutor.capture()); when(taskContext.getTask()).thenReturn(updateTask.getValue()); ClusterState stableState = taskExecutor.getValue() - .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(ClusterState.EMPTY_STATE, List.of(taskContext))); + .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(ClusterState.EMPTY_STATE, List.of(taskContext), () -> null)); // run the request again, there should be no call to submit an update task clearInvocations(clusterService); @@ -88,7 +88,7 @@ public void testNoop() throws Exception { verify(clusterService).submitStateUpdateTask(any(), updateTask.capture(), taskConfig.capture(), taskExecutor.capture()); when(taskContext.getTask()).thenReturn(updateTask.getValue()); ClusterState gotState = taskExecutor.getValue() - .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(stableState, List.of(taskContext))); + .execute(new ClusterStateTaskExecutor.BatchExecutionContext<>(stableState, List.of(taskContext), () -> null)); assertThat(gotState, sameInstance(stableState)); } } From 8d37d4842650b27779f7619c2729b583ac509597 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 15 Aug 2022 10:53:41 +0100 Subject: [PATCH 197/265] Check circuit breaker before sending join request (#89318) Adds a simple preflight check to `JoinHelper#sendJoinRequest` to avoid sending a join request if it looks like the `inflight_requests` circuit breaker is going to trip on the join validation message. Closes #85003 --- docs/changelog/89318.yaml | 6 ++ .../discovery/DiscoveryDisruptionIT.java | 57 +++++++++++++++++++ .../cluster/coordination/Coordinator.java | 7 ++- .../cluster/coordination/JoinHelper.java | 43 ++++++++++++-- .../discovery/DiscoveryModule.java | 7 ++- .../java/org/elasticsearch/node/Node.java | 3 +- .../cluster/coordination/JoinHelperTests.java | 7 ++- .../cluster/coordination/NodeJoinTests.java | 4 +- .../discovery/DiscoveryModuleTests.java | 4 +- .../snapshots/SnapshotResiliencyTests.java | 3 +- .../AbstractCoordinatorTestCase.java | 4 +- 11 files changed, 128 insertions(+), 17 deletions(-) create mode 100644 docs/changelog/89318.yaml diff --git a/docs/changelog/89318.yaml b/docs/changelog/89318.yaml new file mode 100644 index 0000000000000..86cbde6e5b548 --- /dev/null +++ b/docs/changelog/89318.yaml @@ -0,0 +1,6 @@ +pr: 89318 +summary: Check circuit breaker before sending join request +area: Cluster Coordination +type: bug +issues: + - 85003 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 9694a11eb4d6e..e85c70775fb5c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -16,7 +16,10 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; @@ -257,4 +260,58 @@ public void testJoinWaitsForClusterApplier() throws Exception { logger.info("--> waiting for cluster to heal"); ensureStableCluster(3); } + + public void testJoinWaitsForCircuitBreaker() throws InterruptedException { + startCluster(3); + + final var masterName = internalCluster().getMasterName(); + final var victimName = randomValueOtherThan(masterName, () -> randomFrom(internalCluster().getNodeNames())); + logger.info("--> master [{}], victim [{}]", masterName, victimName); + + // fill up the circuit breaker to breaking point + final var circuitBreaker = internalCluster().getInstance(CircuitBreakerService.class, victimName) + .getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); + long allocationSize = 1; + while (true) { + try { + circuitBreaker.addEstimateBytesAndMaybeBreak(allocationSize, "test"); + } catch (CircuitBreakingException e) { + circuitBreaker.addWithoutBreaking(allocationSize); + break; + } + allocationSize <<= 1; + assert 0 <= allocationSize; + } + + // drop the victim from the cluster with a network disruption + final var masterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, masterName); + masterTransportService.addFailToSendNoConnectRule(internalCluster().getInstance(TransportService.class, victimName)); + logger.info("--> waiting for victim's departure"); + ensureStableCluster(2, masterName); + + // verify that the victim sends no joins while the circuit breaker is breaking + final var victimTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, victimName); + victimTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + assertNotEquals(action, JoinHelper.JOIN_ACTION_NAME); + connection.sendRequest(requestId, action, request, options); + }); + + // fix the network disruption + logger.info("--> removing network disruption"); + masterTransportService.clearAllRules(); + ensureStableCluster(2, masterName); + + // permit joins again + victimTransportService.addSendBehavior(null); + + // release the breaker + logger.info("--> releasing allocations from circuit breaker"); + while (0 < allocationSize) { + circuitBreaker.addWithoutBreaking(-allocationSize); + allocationSize >>= 1; + } + + logger.info("--> waiting for cluster to heal"); + ensureStableCluster(3); + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index f583bf090d59f..6c024ae879ec0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -61,6 +61,7 @@ import org.elasticsearch.discovery.SeedHostsProvider; import org.elasticsearch.discovery.SeedHostsResolver; import org.elasticsearch.discovery.TransportAddressConnector; +import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.threadpool.Scheduler; @@ -197,7 +198,8 @@ public Coordinator( Random random, RerouteService rerouteService, ElectionStrategy electionStrategy, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + CircuitBreakerService circuitBreakerService ) { this.settings = settings; this.transportService = transportService; @@ -217,7 +219,8 @@ public Coordinator( this::joinLeaderInTerm, rerouteService, nodeHealthService, - joinReasonService + joinReasonService, + circuitBreakerService ); this.joinValidationService = new JoinValidationService( settings, diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index 937eded6f3a5e..48d6ba553cbbe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.cluster.ClusterStateTaskConfig; @@ -21,11 +22,14 @@ import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.threadpool.ThreadPool; @@ -67,6 +71,7 @@ public class JoinHelper { private final LongSupplier currentTermSupplier; private final NodeHealthService nodeHealthService; private final JoinReasonService joinReasonService; + private final CircuitBreakerService circuitBreakerService; private final Map, PendingJoinInfo> pendingOutgoingJoins = ConcurrentCollections.newConcurrentMap(); private final AtomicReference lastFailedJoinAttempt = new AtomicReference<>(); @@ -82,11 +87,13 @@ public class JoinHelper { Function joinLeaderInTerm, RerouteService rerouteService, NodeHealthService nodeHealthService, - JoinReasonService joinReasonService + JoinReasonService joinReasonService, + CircuitBreakerService circuitBreakerService ) { this.masterService = masterService; this.clusterApplier = clusterApplier; this.transportService = transportService; + this.circuitBreakerService = circuitBreakerService; this.joinTaskExecutor = new JoinTaskExecutor(allocationService, rerouteService); this.currentTermSupplier = currentTermSupplier; this.nodeHealthService = nodeHealthService; @@ -166,10 +173,10 @@ private void unregisterAndReleaseConnection(DiscoveryNode destination, Releasabl static class FailedJoinAttempt { private final DiscoveryNode destination; private final JoinRequest joinRequest; - private final TransportException exception; + private final ElasticsearchException exception; private final long timestamp; - FailedJoinAttempt(DiscoveryNode destination, JoinRequest joinRequest, TransportException exception) { + FailedJoinAttempt(DiscoveryNode destination, JoinRequest joinRequest, ElasticsearchException exception) { this.destination = destination; this.joinRequest = joinRequest; this.exception = exception; @@ -180,9 +187,10 @@ void logNow() { logger.log(getLogLevel(exception), () -> format("failed to join %s with %s", destination, joinRequest), exception); } - static Level getLogLevel(TransportException e) { + static Level getLogLevel(ElasticsearchException e) { Throwable cause = e.unwrapCause(); if (cause instanceof CoordinationStateRejectedException + || cause instanceof CircuitBreakingException || (cause instanceof Exception causeException && MasterService.isPublishFailureException(causeException))) { return Level.DEBUG; } @@ -221,6 +229,29 @@ public void sendJoinRequest(DiscoveryNode destination, long term, Optional final Tuple dedupKey = Tuple.tuple(destination, joinRequest); final var pendingJoinInfo = new PendingJoinInfo(transportService.getThreadPool().relativeTimeInMillis()); if (pendingOutgoingJoins.putIfAbsent(dedupKey, pendingJoinInfo) == null) { + + // If this node is under excessive heap pressure then the state it receives for join validation will trip a circuit breaker and + // fail the join attempt, resulting in retrying in a loop which makes the master just send a constant stream of cluster states + // to this node. We try and keep the problem local to this node by checking that we can at least allocate one byte: + final var breaker = circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); + try { + breaker.addEstimateBytesAndMaybeBreak(1L, "pre-flight join request"); + } catch (Exception e) { + pendingJoinInfo.message = PENDING_JOIN_FAILED; + pendingOutgoingJoins.remove(dedupKey); + if (e instanceof ElasticsearchException elasticsearchException) { + final var attempt = new FailedJoinAttempt(destination, joinRequest, elasticsearchException); + attempt.logNow(); + lastFailedJoinAttempt.set(attempt); + assert elasticsearchException instanceof CircuitBreakingException : e; // others shouldn't happen, handle them anyway + } else { + logger.error("join failed during pre-flight circuit breaker check", e); + assert false : e; // shouldn't happen, handle it anyway + } + return; + } + breaker.addWithoutBreaking(-1L); + logger.debug("attempting to join {} with {}", destination, joinRequest); pendingJoinInfo.message = PENDING_JOIN_CONNECTING; // Typically we're already connected to the destination at this point, the PeerFinder holds a reference to this connection to @@ -279,7 +310,7 @@ public void onFailure(Exception e) { private void cleanUpOnFailure(TransportException exp) { pendingJoinInfo.message = PENDING_JOIN_FAILED; pendingOutgoingJoins.remove(dedupKey); - FailedJoinAttempt attempt = new FailedJoinAttempt(destination, joinRequest, exp); + final var attempt = new FailedJoinAttempt(destination, joinRequest, exp); attempt.logNow(); lastFailedJoinAttempt.set(attempt); unregisterAndReleaseConnection(destination, connectionReference); @@ -292,7 +323,7 @@ private void cleanUpOnFailure(TransportException exp) { public void onFailure(Exception e) { pendingJoinInfo.message = PENDING_JOIN_CONNECT_FAILED; pendingOutgoingJoins.remove(dedupKey); - FailedJoinAttempt attempt = new FailedJoinAttempt( + final var attempt = new FailedJoinAttempt( destination, joinRequest, new ConnectTransportException(destination, "failed to acquire connection", e) diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index c58e8e995b7bb..181c30136f19a 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.gateway.GatewayMetaState; +import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.transport.TransportService; @@ -101,7 +102,8 @@ public DiscoveryModule( Path configFile, GatewayMetaState gatewayMetaState, RerouteService rerouteService, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + CircuitBreakerService circuitBreakerService ) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); @@ -191,7 +193,8 @@ public DiscoveryModule( new Random(Randomness.get().nextLong()), rerouteService, electionStrategy, - nodeHealthService + nodeHealthService, + circuitBreakerService ); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index ff3d28bdce663..f89e3969e2e2a 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -857,7 +857,8 @@ protected Node( environment.configFile(), gatewayMetaState, rerouteService, - fsHealthService + fsHealthService, + circuitBreakerService ); this.nodeService = new NodeService( settings, diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java index 7db99eec545e2..435fbc46f94b8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; @@ -74,7 +75,8 @@ public void testJoinDeduplication() { startJoinRequest -> { throw new AssertionError(); }, (s, p, r) -> {}, () -> new StatusInfo(HEALTHY, "info"), - new JoinReasonService(() -> 0L) + new JoinReasonService(() -> 0L), + new NoneCircuitBreakerService() ); transportService.start(); @@ -229,7 +231,8 @@ public void testJoinFailureOnUnhealthyNodes() { startJoinRequest -> { throw new AssertionError(); }, (s, p, r) -> {}, nodeHealthServiceStatus::get, - new JoinReasonService(() -> 0L) + new JoinReasonService(() -> 0L), + new NoneCircuitBreakerService() ); transportService.start(); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java index 24d9d96ef9468..01602deb53d3a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.node.Node; @@ -224,7 +225,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req random, (s, p, r) -> {}, ElectionStrategy.DEFAULT_INSTANCE, - nodeHealthService + nodeHealthService, + new NoneCircuitBreakerService() ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index c381333af0f9a..3af4cffda8c82 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; import org.elasticsearch.gateway.GatewayMetaState; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -91,7 +92,8 @@ private DiscoveryModule newModule(Settings settings, List plugi createTempDir().toAbsolutePath(), gatewayMetaState, mock(RerouteService.class), - null + null, + new NoneCircuitBreakerService() ); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 4448a26b5f144..838e383d2457e 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -2172,7 +2172,8 @@ public void start(ClusterState initialState) { random(), rerouteService, ElectionStrategy.DEFAULT_INSTANCE, - () -> new StatusInfo(HEALTHY, "healthy-info") + () -> new StatusInfo(HEALTHY, "healthy-info"), + new NoneCircuitBreakerService() ); masterService.setClusterStatePublisher(coordinator); coordinator.start(); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index 49664761f7897..3f820e9772518 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -68,6 +68,7 @@ import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.MockGatewayMetaState; import org.elasticsearch.gateway.PersistedClusterStateService; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.test.ESTestCase; @@ -1250,7 +1251,8 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { Randomness.get(), (s, p, r) -> {}, getElectionStrategy(), - nodeHealthService + nodeHealthService, + new NoneCircuitBreakerService() ); coordinationDiagnosticsService = new CoordinationDiagnosticsService( clusterService, From c4c1802fb10085b38d65f73c84997eb254455954 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Mon, 15 Aug 2022 12:09:47 +0200 Subject: [PATCH 198/265] Unify handling of custom Gradle User home in build tool tests (#89304) - keep guh separated from test project dir - unify folder handling as a side effect when keeping the project dir for debugging failed tests we do not copy the whole GUH which usually isn't providing any additional help for debugging those --- ...lDistributionDownloadPluginFuncTest.groovy | 2 +- .../internal/JdkDownloadPluginFuncTest.groovy | 8 ++--- .../DistributionDownloadPluginFuncTest.groovy | 6 ++-- .../gradle/TestClustersPluginFuncTest.groovy | 6 ++-- ...GradleTestPolicySetupPluginFuncTest.groovy | 4 +-- .../fixtures/AbstractGradleFuncTest.groovy | 31 ++++++++++--------- .../test/NormalizeOutputGradleRunner.java | 13 ++++---- 7 files changed, 35 insertions(+), 35 deletions(-) diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy index 8d1a038331dca..d43dbec5ef6b6 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy @@ -39,7 +39,7 @@ class InternalDistributionDownloadPluginFuncTest extends AbstractGradleFuncTest """ when: - def result = gradleRunner("setupDistro", '-g', testProjectDir.newFolder('GUH').path).build() + def result = gradleRunner("setupDistro", '-g', gradleUserHome).build() then: result.task(":distribution:archives:${testArchiveProjectName}:buildExpanded").outcome == TaskOutcome.SUCCESS diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy index 8300318fbdc16..ec546508c677d 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy @@ -8,6 +8,7 @@ package org.elasticsearch.gradle.internal +import spock.lang.TempDir import spock.lang.Unroll import com.github.tomakehurst.wiremock.WireMockServer @@ -126,7 +127,7 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { when: def result = WiremockFixture.withWireMock(mockRepoUrl, mockedContent) { server -> buildFile << repositoryMockSetup(server, jdkVendor, jdkVersion) - gradleRunner('getJdk', '-i', '-g', testProjectDir.newFolder().toString()).build() + gradleRunner('getJdk', '-i', '-g', gradleUserHome).build() } then: @@ -179,13 +180,12 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { def result = WiremockFixture.withWireMock(mockRepoUrl, mockedContent) { server -> buildFile << repositoryMockSetup(server, VENDOR_ADOPTIUM, ADOPT_JDK_VERSION) - def commonGradleUserHome = testProjectDir.newFolder().toString() // initial run - def firstResult = gradleRunner('clean', 'getJdk', '-i', '--warning-mode', 'all', '-g', commonGradleUserHome).build() + def firstResult = gradleRunner('clean', 'getJdk', '-i', '--warning-mode', 'all', '-g', gradleUserHome).build() // assert the output of an executed transform is shown assertOutputContains(firstResult.output, "Unpacking $expectedArchiveName using $transformType") // run against up-to-date transformations - gradleRunner('clean', 'getJdk', '-i', '--warning-mode', 'all', '-g', commonGradleUserHome).build() + gradleRunner('clean', 'getJdk', '-i', '--warning-mode', 'all', '-g', gradleUserHome).build() } then: diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy index 1e461665d7139..228223897ede9 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/DistributionDownloadPluginFuncTest.groovy @@ -49,8 +49,7 @@ class DistributionDownloadPluginFuncTest extends AbstractGradleFuncTest { """ when: - def guh = new File(testProjectDir.getRoot(), "gradle-user-home").absolutePath; - def runner = gradleRunner('clean', 'setupDistro', '-i', '-g', guh) + def runner = gradleRunner('clean', 'setupDistro', '-i', '-g', gradleUserHome) def unpackingMessage = "Unpacking elasticsearch-${version}-linux-${Architecture.current().classifier}.tar.gz " + "using SymbolicLinkPreservingUntarTransform" def result = withMockedDistributionDownload(version, platform, runner) { @@ -92,8 +91,7 @@ class DistributionDownloadPluginFuncTest extends AbstractGradleFuncTest { """ when: - def customGradleUserHome = testProjectDir.newFolder().absolutePath; - def runner = gradleRunner('setupDistro', '-i', '-g', customGradleUserHome) + def runner = gradleRunner('setupDistro', '-i', '-g', gradleUserHome) def result = withMockedDistributionDownload(version, platform, runner) { build() } diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy index 5287d4a932587..6b662b8165034 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy @@ -103,7 +103,7 @@ class TestClustersPluginFuncTest extends AbstractGradleFuncTest { """ when: - def runner = gradleRunner("myTask", '-i', '-g', 'guh') + def runner = gradleRunner("myTask", '-i', '-g', gradleUserHome) def runningClosure = { GradleRunner r -> r.build() } withMockedDistributionDownload(runner, runningClosure) def result = inputProperty == "distributionClasspath" ? @@ -155,12 +155,12 @@ class TestClustersPluginFuncTest extends AbstractGradleFuncTest { """ when: - withMockedDistributionDownload(gradleRunner("myTask", '-g', 'guh')) { + withMockedDistributionDownload(gradleRunner("myTask", '-g', gradleUserHome)) { build() } fileChange.delegate = this fileChange.call(this) - def result = withMockedDistributionDownload(gradleRunner("myTask", '-i', '-g', 'guh')) { + def result = withMockedDistributionDownload(gradleRunner("myTask", '-i', '-g', gradleUserHome)) { build() } diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/test/GradleTestPolicySetupPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/test/GradleTestPolicySetupPluginFuncTest.groovy index 6d72dc0a611e5..2353b7e2f7d34 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/test/GradleTestPolicySetupPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/test/GradleTestPolicySetupPluginFuncTest.groovy @@ -50,13 +50,13 @@ class GradleTestPolicySetupPluginFuncTest extends AbstractGradleFuncTest { """ when: - def result = gradleRunner('test', '-g', "guh1").build() + def result = gradleRunner('test', '-g', gradleUserHome).build() then: result.task(":test").outcome == TaskOutcome.SUCCESS when: // changing gradle user home - result = gradleRunner('test', '-g', "guh2").build() + result = gradleRunner('test', '-g', gradleUserHome).build() then: // still up-to-date result.task(":test").outcome == TaskOutcome.UP_TO_DATE } diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index 87bff62d0184e..1724d8176b563 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -18,6 +18,7 @@ import org.gradle.testkit.runner.GradleRunner import org.junit.Rule import org.junit.rules.TemporaryFolder import spock.lang.Specification +import spock.lang.TempDir import java.lang.management.ManagementFactory import java.util.jar.JarEntry @@ -30,6 +31,9 @@ abstract class AbstractGradleFuncTest extends Specification { @Rule TemporaryFolder testProjectDir = new TemporaryFolder() + @TempDir + File gradleUserHome + File settingsFile File buildFile File propertiesFile @@ -69,24 +73,23 @@ abstract class AbstractGradleFuncTest extends Specification { subProjectBuild } - GradleRunner gradleRunner(String... arguments) { + GradleRunner gradleRunner(Object... arguments) { return gradleRunner(testProjectDir.root, arguments) } - GradleRunner gradleRunner(File projectDir, String... arguments) { + GradleRunner gradleRunner(File projectDir, Object... arguments) { return new NormalizeOutputGradleRunner( - new ConfigurationCacheCompatibleAwareGradleRunner( - new InternalAwareGradleRunner( - GradleRunner.create() - .withDebug(ManagementFactory.getRuntimeMXBean().getInputArguments() - .toString().indexOf("-agentlib:jdwp") > 0 - ) - .withProjectDir(projectDir) - .withPluginClasspath() - .forwardOutput() - ), configurationCacheCompatible), - projectDir - ).withArguments(arguments) + new ConfigurationCacheCompatibleAwareGradleRunner( + new InternalAwareGradleRunner( + GradleRunner.create() + .withDebug(ManagementFactory.getRuntimeMXBean().getInputArguments() + .toString().indexOf("-agentlib:jdwp") > 0 + ) + .withProjectDir(projectDir) + .withPluginClasspath() + .forwardOutput() + ), configurationCacheCompatible), + ).withArguments(arguments.collect { it.toString() }) } def assertOutputContains(String givenOutput, String expected) { diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/NormalizeOutputGradleRunner.java b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/NormalizeOutputGradleRunner.java index 940d8277a5dba..0c535eb7e60fb 100644 --- a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/NormalizeOutputGradleRunner.java +++ b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/NormalizeOutputGradleRunner.java @@ -27,9 +27,10 @@ public class NormalizeOutputGradleRunner extends GradleRunner { - public NormalizeOutputGradleRunner(GradleRunner delegate, File projectRootDir) { + private GradleRunner delegate; + + public NormalizeOutputGradleRunner(GradleRunner delegate) { this.delegate = delegate; - this.projectRootDir = projectRootDir; } @Override @@ -74,7 +75,8 @@ public List getArguments() { @Override public GradleRunner withArguments(List arguments) { - return delegate.withArguments(arguments); + delegate.withArguments(arguments); + return this; } @Override @@ -150,9 +152,6 @@ public BuildResult buildAndFail() throws InvalidRunnerConfigurationException, Un return new NormalizedBuildResult(delegate.buildAndFail()); } - private GradleRunner delegate; - private File projectRootDir; - private class NormalizedBuildResult implements BuildResult { private BuildResult delegate; private String normalizedString; @@ -164,7 +163,7 @@ private class NormalizedBuildResult implements BuildResult { @Override public String getOutput() { if (normalizedString == null) { - normalizedString = normalizeString(delegate.getOutput(), projectRootDir); + normalizedString = normalizeString(delegate.getOutput(), getProjectDir()); } return normalizedString; } From 9b24b418a178b244a69d49efaa7cdc166e69460e Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 15 Aug 2022 13:14:36 +0100 Subject: [PATCH 199/265] Force rejection of unsupported bulk actions in v9 (#89339) Adds a mention of `Version.V_7_17_0` so that we don't forget to remove this deprecated behaviour in the next major version. Relates #78876 --- .../org/elasticsearch/action/bulk/BulkRequestParser.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java index e2c58a21156bc..e7c01020493f5 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.bulk; +import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -184,6 +185,7 @@ public void parse( } String action = parser.currentName(); if (SUPPORTED_ACTIONS.contains(action) == false) { + assert Version.CURRENT.major == Version.V_7_17_0.major + 1; deprecationLogger.compatibleCritical( STRICT_ACTION_PARSING_WARNING_KEY, "Unsupported action: [{}]. Supported values are [create], [delete], [index], and [update]. " @@ -424,6 +426,7 @@ private static void checkBulkActionIsProperlyClosed(XContentParser parser) throw try { token = parser.nextToken(); } catch (XContentEOFException ignore) { + assert Version.CURRENT.major == Version.V_7_17_0.major + 1; deprecationLogger.compatibleCritical( STRICT_ACTION_PARSING_WARNING_KEY, "A bulk action wasn't closed properly with the closing brace. Malformed objects are currently accepted but will be " @@ -432,6 +435,7 @@ private static void checkBulkActionIsProperlyClosed(XContentParser parser) throw return; } if (token != XContentParser.Token.END_OBJECT) { + assert Version.CURRENT.major == Version.V_7_17_0.major + 1; deprecationLogger.compatibleCritical( STRICT_ACTION_PARSING_WARNING_KEY, "A bulk action object contained multiple keys. Additional keys are currently ignored but will be rejected in a " @@ -440,6 +444,7 @@ private static void checkBulkActionIsProperlyClosed(XContentParser parser) throw return; } if (parser.nextToken() != null) { + assert Version.CURRENT.major == Version.V_7_17_0.major + 1; deprecationLogger.compatibleCritical( STRICT_ACTION_PARSING_WARNING_KEY, "A bulk action contained trailing data after the closing brace. This is currently ignored but will be rejected in a " From 10b804730d19d7052d1f3e6cfe9a8c55df9e862d Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Mon, 15 Aug 2022 09:43:12 -0400 Subject: [PATCH 200/265] Include runtime fields in total fields count (#89251) We have a check that enforces the total number of fields needs to be below a certain (configurable) threshold. Before runtime fields did not contribute to the count. This patch makes all runtime fields contribute to the count, runtime fields: - that were explicitly defined in mapping by a user - as well as runtime fields that were dynamically created by dynamic mappings Closes #88265 --- docs/changelog/89251.yaml | 6 ++ .../mapping/mapping-settings-limit.asciidoc | 3 +- .../index/mapper/DynamicMappingIT.java | 66 +++++++++++++++++++ .../index/mapper/DocumentParserContext.java | 8 ++- .../index/mapper/MappingLookup.java | 5 +- .../index/mapper/MapperServiceTests.java | 7 ++ 6 files changed, 92 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/89251.yaml diff --git a/docs/changelog/89251.yaml b/docs/changelog/89251.yaml new file mode 100644 index 0000000000000..a3285d7b467a5 --- /dev/null +++ b/docs/changelog/89251.yaml @@ -0,0 +1,6 @@ +pr: 89251 +summary: Include runtime fields in total fields count +area: Mapping +type: bug +issues: + - 88265 diff --git a/docs/reference/mapping/mapping-settings-limit.asciidoc b/docs/reference/mapping/mapping-settings-limit.asciidoc index 0f94a376f4041..c499ca7675f2c 100644 --- a/docs/reference/mapping/mapping-settings-limit.asciidoc +++ b/docs/reference/mapping/mapping-settings-limit.asciidoc @@ -4,7 +4,8 @@ Use the following settings to limit the number of field mappings (created manual `index.mapping.total_fields.limit`:: The maximum number of fields in an index. Field and object mappings, as well as - field aliases count towards this limit. The default value is `1000`. + field aliases count towards this limit. Mapped runtime fields count towards this + limit as well. The default value is `1000`. + [IMPORTANT] ==== diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 4e2b3fb952164..d31475a172056 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Randomness; @@ -213,6 +214,71 @@ public void onFailure(Exception e) { } } + public void testTotalFieldsLimitWithRuntimeFields() { + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 4) + .build(); + + String mapping = """ + { + "dynamic":"runtime", + "runtime": { + "my_object.rfield1": { + "type": "keyword" + }, + "rfield2": { + "type": "keyword" + } + }, + "properties": { + "field3" : { + "type": "keyword" + } + } + } + """; + + client().admin().indices().prepareCreate("index1").setSettings(indexSettings).setMapping(mapping).get(); + ensureGreen("index1"); + + { + // introduction of a new object with 2 new sub-fields fails + final IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index1") + .setId("1") + .setSource("field3", "value3", "my_object2", Map.of("new_field1", "value1", "new_field2", "value2")); + Exception exc = expectThrows(MapperParsingException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))); + assertThat(exc.getMessage(), Matchers.containsString("failed to parse")); + assertThat(exc.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat( + exc.getCause().getMessage(), + Matchers.containsString("Limit of total fields [4] has been exceeded while adding new fields [2]") + ); + } + + { + // introduction of a new single field succeeds + client().prepareIndex("index1").setId("2").setSource("field3", "value3", "new_field4", 100).get(); + } + + { + // remove 2 runtime field mappings + assertAcked(client().admin().indices().preparePutMapping("index1").setSource(""" + { + "runtime": { + "my_object.rfield1": null, + "rfield2" : null + } + } + """, XContentType.JSON)); + + // introduction of a new object with 2 new sub-fields succeeds + client().prepareIndex("index1") + .setId("1") + .setSource("field3", "value3", "my_object2", Map.of("new_field1", "value1", "new_field2", "value2")); + } + } + public void testMappingVersionAfterDynamicMappingUpdate() throws Exception { createIndex("test"); final ClusterService clusterService = internalCluster().clusterService(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 9b6353b862b24..da4b8673c362b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -277,8 +277,14 @@ final ObjectMapper getDynamicObjectMapper(String name) { /** * Add a new runtime field dynamically created while parsing. + * We use the same set for both new indexed and new runtime fields, + * because for dynamic mappings, a new field can be either mapped + * as runtime or indexed, but never both. */ - public final void addDynamicRuntimeField(RuntimeField runtimeField) { + final void addDynamicRuntimeField(RuntimeField runtimeField) { + if (newFieldsSeen.add(runtimeField.name())) { + mappingLookup.checkFieldLimit(indexSettings().getMappingTotalFieldsLimit(), newFieldsSeen.size()); + } dynamicRuntimeFields.add(runtimeField); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index 19f02f9ad0b84..a45fa7ff0e248 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -47,6 +47,7 @@ private CacheKey() {} /** Full field name to mapper */ private final Map fieldMappers; private final Map objectMappers; + private final int runtimeFieldMappersCount; private final NestedLookup nestedLookup; private final FieldTypeLookup fieldTypeLookup; private final FieldTypeLookup indexTimeLookup; // for index-time scripts, a lookup that does not include runtime fields @@ -180,6 +181,7 @@ private MappingLookup( // make all fields into compact+fast immutable maps this.fieldMappers = Map.copyOf(fieldMappers); this.objectMappers = Map.copyOf(objects); + this.runtimeFieldMappersCount = runtimeFields.size(); this.indexAnalyzersMap = Map.copyOf(indexAnalyzersMap); this.completionFields = Set.copyOf(completionFields); this.indexTimeScriptMappers = List.copyOf(indexTimeScriptMappers); @@ -262,7 +264,8 @@ private void checkFieldLimit(long limit) { } void checkFieldLimit(long limit, int additionalFieldsToAdd) { - if (fieldMappers.size() + objectMappers.size() + additionalFieldsToAdd - mapping.getSortedMetadataMappers().length > limit) { + if (fieldMappers.size() + objectMappers.size() + runtimeFieldMappersCount + additionalFieldsToAdd - mapping + .getSortedMetadataMappers().length > limit) { throw new IllegalArgumentException( "Limit of total fields [" + limit diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index f3771510d8da9..38d7567ce40e3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -70,6 +70,13 @@ public void testTotalFieldsLimit() throws Throwable { () -> merge(mapperService, mapping(b -> b.startObject("newfield").field("type", "long").endObject())) ); assertTrue(e.getMessage(), e.getMessage().contains("Limit of total fields [" + totalFieldsLimit + "] has been exceeded")); + + // adding one more runtime field should trigger exception + e = expectThrows( + IllegalArgumentException.class, + () -> merge(mapperService, runtimeMapping(b -> b.startObject("newfield").field("type", "long").endObject())) + ); + assertTrue(e.getMessage(), e.getMessage().contains("Limit of total fields [" + totalFieldsLimit + "] has been exceeded")); } private void createMappingSpecifyingNumberOfFields(XContentBuilder b, int numberOfFields) throws IOException { From 60016c8cf00048224ace8c2a88d78a27edfc6510 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Mon, 15 Aug 2022 10:06:49 -0400 Subject: [PATCH 201/265] convert raw url to hyperlink in javadoc (#89319) Co-authored-by: Elastic Machine --- .../search/aggregations/metrics/HyperLogLogPlusPlus.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index c6c7bda71eda6..d9e06c95fffc8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -23,8 +23,8 @@ /** * Hyperloglog++ counter, implemented based on pseudo code from - * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix - * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen + * this paper and + * its appendix * * This implementation is different from the original implementation in that it uses a hash table instead of a sorted list for linear * counting. Although this requires more space and makes hyperloglog (which is less accurate) used sooner, this is also considerably faster. From 098f5181c46d0ab435bcb188d021ff4695bd9f46 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 16 Aug 2022 00:40:47 +1000 Subject: [PATCH 202/265] Double quote the env variable in curl command (#89279) The env variable can contain special chacacters. Without quoting it could mis-behave when used in the curl command of the setup docker container. --- docs/reference/setup/install/docker/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/setup/install/docker/docker-compose.yml b/docs/reference/setup/install/docker/docker-compose.yml index 90f0ff363bb98..4c6ba48035b4a 100644 --- a/docs/reference/setup/install/docker/docker-compose.yml +++ b/docs/reference/setup/install/docker/docker-compose.yml @@ -53,7 +53,7 @@ services: echo "Waiting for Elasticsearch availability"; until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done; echo "Setting kibana_system password"; - until curl -s -X POST --cacert config/certs/ca/ca.crt -u elastic:${ELASTIC_PASSWORD} -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; + until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; echo "All done!"; ' healthcheck: From 8360bf9a475c9eff46884993e849ee9d0b06c9a2 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 15 Aug 2022 09:43:35 -0500 Subject: [PATCH 203/265] Fixing a version check for master stability functionality (#89322) --- docs/changelog/89322.yaml | 5 +++++ .../cluster/coordination/MasterHistoryService.java | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/89322.yaml diff --git a/docs/changelog/89322.yaml b/docs/changelog/89322.yaml new file mode 100644 index 0000000000000..f9b855759c503 --- /dev/null +++ b/docs/changelog/89322.yaml @@ -0,0 +1,5 @@ +pr: 89322 +summary: Fixing a version check for master stability functionality +area: Health +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java index 38bf270595d45..657c3d73216fe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java @@ -115,7 +115,7 @@ public List getRemoteMasterHistory() throws Exception { */ public void refreshRemoteMasterHistory(DiscoveryNode node) { Version minSupportedVersion = Version.V_8_4_0; - if (node.getVersion().onOrAfter(minSupportedVersion)) { // This was introduced in 8.3.0 + if (node.getVersion().before(minSupportedVersion)) { // This was introduced in 8.3.0 (and the action name changed in 8.4.0) logger.trace( "Cannot get master history for {} because it is at version {} and {} is required", node, From 3496dd5160b2504074bcf319422f4590d5e0e369 Mon Sep 17 00:00:00 2001 From: weizijun Date: Tue, 16 Aug 2022 03:25:33 +0800 Subject: [PATCH 204/265] ILM get lifecycle remove unused code (#89260) Co-authored-by: Elastic Machine --- .../xpack/ilm/action/TransportGetLifecycleAction.java | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java index 13335a4a57b36..6282d70695d24 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.tasks.Task; @@ -36,16 +35,13 @@ public class TransportGetLifecycleAction extends TransportMasterNodeAction { - private final MetadataIndexTemplateService templateService; - @Inject public TransportGetLifecycleAction( TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - MetadataIndexTemplateService metadataIndexTemplateService + IndexNameExpressionResolver indexNameExpressionResolver ) { super( GetLifecycleAction.NAME, @@ -58,7 +54,6 @@ public TransportGetLifecycleAction( Response::new, ThreadPool.Names.SAME ); - this.templateService = metadataIndexTemplateService; } @Override From 081185071f100f2d732cf2d30b58b3862807cfcd Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 15 Aug 2022 17:48:52 -0400 Subject: [PATCH 205/265] Enhance changelog for date_histogram speedup (#89353) See #81322 --- docs/changelog/81322.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/changelog/81322.yaml b/docs/changelog/81322.yaml index 8ebcdd39264fd..68fc8123f9b2f 100644 --- a/docs/changelog/81322.yaml +++ b/docs/changelog/81322.yaml @@ -3,3 +3,12 @@ summary: Speed counting filters/range/date_histogram aggs area: Aggregations type: enhancement issues: [] +highlight: + title: Speed up filters/range/date_histogram aggs + body: |- + This speeds up a few aggregations when they don't have child aggregations. + That's super common, for example, the histogram at the top of Kibana's + discover tab is a `date_histogram` without any child aggregations. That + particular aggregation is sped up by about 85% in our rally tests, dropping + from 250ms to 30ms. + notable: true From 2569d1f08d3435c65c03cc766cb447457424e662 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 15 Aug 2022 17:58:46 -0400 Subject: [PATCH 206/265] Docs: synthetic source doesn't dedupe numbers (#89355) The docs for synthetic `_source` incorrectly claimed that synthetic `_source` deduplicates numbers. It doesn't. The example below the prose shows it *not* removing duplicates. --- docs/reference/mapping/types/numeric.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index ee347664319c4..7eb31d8c1e4fb 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -237,7 +237,7 @@ All numeric fields except `unsigned_long` support <>, <>, or with <> disabled. -Synthetic source always sorts numeric fields and removes duplicates. For example: +Synthetic source always sorts numeric fields. For example: [source,console,id=synthetic-source-numeric-example] ---- PUT idx From 00d4953df5f6074831fae1a8aa63139c0f932292 Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Tue, 16 Aug 2022 08:40:10 +0200 Subject: [PATCH 207/265] [DOCS] Fixes broken example in pipeline tutorial (#89315) --- docs/reference/ingest/common-log-format-example.asciidoc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/docs/reference/ingest/common-log-format-example.asciidoc b/docs/reference/ingest/common-log-format-example.asciidoc index fed77aac94dd0..9ee5a73ceb70d 100644 --- a/docs/reference/ingest/common-log-format-example.asciidoc +++ b/docs/reference/ingest/common-log-format-example.asciidoc @@ -13,9 +13,7 @@ The logs you want to parse look similar to this: [source,log] ---- -212.87.37.154 - - [30/May/2099:16:21:15 +0000] \"GET /favicon.ico HTTP/1.1\" -200 3638 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) -AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\" +212.87.37.154 - - [05/May/2099:16:21:15 +0000] "GET /favicon.ico HTTP/1.1" 200 3638 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36" ---- // NOTCONSOLE @@ -30,7 +28,8 @@ Pipelines**. image::images/ingest/ingest-pipeline-list.png[Kibana's Ingest Pipelines list view,align="center"] . Click **Create pipeline > New pipeline**. -. Provide a name and description for the pipeline. +. Set **Name** to `my-pipeline` and optionally add a description for the +pipeline. . Add a <> to parse the log message: .. Click **Add a processor** and select the **Grok** processor type. @@ -39,7 +38,7 @@ image::images/ingest/ingest-pipeline-list.png[Kibana's Ingest Pipelines list vie + [source,grok] ---- -%{IPORHOST:source.ip} %{USER:user.id} %{USER:user.name} \\[%{HTTPDATE:@timestamp}\\] \"%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}\" %{NUMBER:http.response.status_code:int} (?:-|%{NUMBER:http.response.body.bytes:int}) %{QS:http.request.referrer} %{QS:user_agent} +%{IPORHOST:source.ip} %{USER:user.id} %{USER:user.name} \[%{HTTPDATE:@timestamp}\] "%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}" %{NUMBER:http.response.status_code:int} (?:-|%{NUMBER:http.response.body.bytes:int}) %{QS:http.request.referrer} %{QS:user_agent} ---- // NOTCONSOLE + From e4ff839e4cab6d443d1b43d10733029ae0afecd3 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Tue, 16 Aug 2022 09:27:52 +0100 Subject: [PATCH 208/265] [ML] Skip renormalization after node shutdown API called (#89347) A node can be informed that it is about to be shut down using the node shutdown API. When this happens we gracefully stop the jobs on the node and they persist state. This state persistence includes latest quantiles, and usually receipt of new quantiles triggers a renormalization. However, in the case of an impending node shutdown we do not want to be kicking off new processes that may delay the shutdown. This PR changes the anomaly detection job results processor so that it will not trigger a renormalization based on quantiles received after a node shutdown message is received. --- docs/changelog/89347.yaml | 5 +++ .../autodetect/AutodetectCommunicator.java | 4 +++ .../autodetect/AutodetectProcessManager.java | 1 + .../output/AutodetectResultProcessor.java | 9 +++++- .../AutodetectProcessManagerTests.java | 32 +++++++++++++++++++ 5 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/89347.yaml diff --git a/docs/changelog/89347.yaml b/docs/changelog/89347.yaml new file mode 100644 index 0000000000000..8f239d89dda6b --- /dev/null +++ b/docs/changelog/89347.yaml @@ -0,0 +1,5 @@ +pr: 89347 +summary: Skip renormalization after node shutdown API called +area: Machine Learning +type: enhancement +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java index dfcefd7560f29..97308c1d61c8b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java @@ -414,4 +414,8 @@ private void createCategorizationAnalyzer(AnalysisRegistry analysisRegistry) thr } categorizationAnalyzer = new CategorizationAnalyzer(analysisRegistry, categorizationAnalyzerConfig); } + + public void setVacating(boolean vacating) { + autodetectResultProcessor.setVacating(vacating); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 01d2e97d3b30d..9170950778444 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -901,6 +901,7 @@ private void closeProcessAndTask(ProcessContext processContext, JobTask jobTask, if (jobKilled) { communicator.killProcess(true, false, false); } else { + communicator.setVacating(jobTask.isVacating()); // communicator.close() may take a long time to run, if the job persists a large model state as a // result of calling it. We want to leave open the option to kill the job during this time, which // is why the allocation ID must remain in the map until after the close is complete. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java index 427e11108f8da..75c001f57de02 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java @@ -95,6 +95,7 @@ public class AutodetectResultProcessor { final Semaphore updateModelSnapshotSemaphore = new Semaphore(1); private final FlushListener flushListener; private volatile boolean processKilled; + private volatile boolean vacating; private volatile boolean failed; private final Map runningForecasts; private final long priorRunsBucketCount; @@ -233,6 +234,7 @@ private void readResults() { public void setProcessKilled() { processKilled = true; + vacating = false; try { renormalizer.shutdown(); } catch (InterruptedException e) { @@ -241,6 +243,10 @@ public void setProcessKilled() { } } + public void setVacating(boolean vacating) { + this.vacating = vacating; + } + void handleOpenForecasts() { try { if (runningForecasts.isEmpty() == false) { @@ -360,7 +366,8 @@ void processResult(AutodetectResult result) { persister.persistQuantiles(quantiles, this::isAlive); bulkResultsPersister.executeRequest(); - if (processKilled == false && renormalizer.isEnabled()) { + // If a node is trying to shut down then don't trigger any further normalizations on the node + if (vacating == false && processKilled == false && renormalizer.isEnabled()) { // We need to make all results written up to these quantiles available for renormalization persister.commitResultWrites(jobId); LOGGER.debug("[{}] Quantiles queued for renormalization", jobId); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index b6fb9666ea012..35a7d9c422c10 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -420,6 +420,38 @@ public void testCloseJob() { assertEquals(1, manager.numberOfOpenJobs()); manager.closeJob(jobTask, null); assertEquals(0, manager.numberOfOpenJobs()); + verify(autodetectCommunicator).setVacating(false); + } + + public void testVacate() { + ExecutorService executorService = mock(ExecutorService.class); + doAnswer(invocationOnMock -> { + ((Runnable) invocationOnMock.getArguments()[0]).run(); + return null; + }).when(executorService).execute(any(Runnable.class)); + when(threadPool.executor(anyString())).thenReturn(executorService); + AutodetectProcessManager manager = createSpyManager(); + assertEquals(0, manager.numberOfOpenJobs()); + + JobTask jobTask = mock(JobTask.class); + when(jobTask.getJobId()).thenReturn("foo"); + when(jobTask.triggerVacate()).thenReturn(true); + manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.processData( + jobTask, + analysisRegistry, + createInputStream(""), + randomFrom(XContentType.values()), + mock(DataLoadParams.class), + (dataCounts1, e) -> {} + ); + + // job is created + assertEquals(1, manager.numberOfOpenJobs()); + when(jobTask.isVacating()).thenReturn(true); + manager.vacateOpenJobsOnThisNode(); + assertEquals(0, manager.numberOfOpenJobs()); + verify(autodetectCommunicator).setVacating(true); } public void testCanCloseClosingJob() throws Exception { From f87ce071388b962d79aadae19f4c4f5d896efd7e Mon Sep 17 00:00:00 2001 From: David Roberts Date: Tue, 16 Aug 2022 11:12:00 +0100 Subject: [PATCH 209/265] [ML] Sync changelogs between 8.4 and main (#89377) Copies a few changelog edits from the 8.4 branch into main so that people looking at the main branch see the same docs as people looking at 8.4. Relates #89078 Relates #89286 Relates #89376 --- docs/changelog/87361.yaml | 13 +++++++++++++ docs/changelog/88589.yaml | 21 +++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 docs/changelog/88589.yaml diff --git a/docs/changelog/87361.yaml b/docs/changelog/87361.yaml index fcca508ff249b..4901574f34537 100644 --- a/docs/changelog/87361.yaml +++ b/docs/changelog/87361.yaml @@ -3,4 +3,17 @@ summary: "Implement per-transform num_failure_retries setting" area: Transform type: enhancement issues: [] +highlight: + title: Infinite and adaptive retries for transforms + body: |- + Infinite and adaptive retries – available in 8.4 – makes it possible for + transforms to recover after a failure without any user intervention. Retries + can be configured per transform. The transform retries become less frequent + progressively. The interval between retries doubles after reaching a one-hour + threshold. This is because the possibility that retries solve the problem is + less likely after each failed retry. + + In the *Transforms* page in *{stack-manage-app}* in {kib}, the number of retries + can be configured when creating a new transform or editing an existing one. + notable: true diff --git a/docs/changelog/88589.yaml b/docs/changelog/88589.yaml new file mode 100644 index 0000000000000..946697743abf4 --- /dev/null +++ b/docs/changelog/88589.yaml @@ -0,0 +1,21 @@ +pr: 88589 +summary: Make composite aggs in datafeeds Generally Available +area: Machine Learning +type: feature +issues: [] +highlight: + title: Composite aggregations in datafeeds are Generally Available + body: |- + The support for + {ml-docs}/ml-configuring-aggregation.html#aggs-using-composite[composite aggregations] + in datafeeds is now generally available. + + [discrete] + [[early-stopping-dfa]] + === Optimizing speed of {dfanalytics} + {dfanalytics-cap} is even faster in 8.4. The new function automatically + stops the process of hyperparameter optimization early in case of the + accuracy gain for a different set of hyperparameter values would be + insignificant. The early stopping of the optimization process results in a + shorter runtime for the {dfanalytics-job}. + notable: true \ No newline at end of file From e7a84b159958f2496046eec2c19c37be44ad85d4 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 16 Aug 2022 21:00:20 +1000 Subject: [PATCH 210/265] Remove deprecated convenient methods from GetApiKeyRequest (#89360) A builder class for GetApiKeyRequest is added as part of #89273. As a result, the existing convenient methods got deprecated in favour of the builder. This PR removes the deprecated methods and replaces all usages with the builder. --- .../action/apikey/GetApiKeyRequest.java | 70 ------------------- .../action/apikey/GetApiKeyRequestTests.java | 41 ++++++++--- .../authz/store/ReservedRolesStoreTests.java | 2 +- .../security/authc/ApiKeyIntegTests.java | 22 +++--- .../authc/apikey/ApiKeySingleNodeTests.java | 4 +- .../service/ElasticServiceAccountsTests.java | 12 +++- .../xpack/security/authz/RBACEngineTests.java | 4 +- 7 files changed, 58 insertions(+), 97 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java index 91dd1f937156a..572b2ab029b84 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java @@ -94,76 +94,6 @@ public boolean withLimitedBy() { return withLimitedBy; } - /** - * Creates get API key request for given realm name - * @param realmName realm name - * @return {@link GetApiKeyRequest} - */ - @Deprecated - public static GetApiKeyRequest usingRealmName(String realmName) { - return new GetApiKeyRequest(realmName, null, null, null, false, false); - } - - /** - * Creates get API key request for given user name - * @param userName user name - * @return {@link GetApiKeyRequest} - */ - @Deprecated - public static GetApiKeyRequest usingUserName(String userName) { - return new GetApiKeyRequest(null, userName, null, null, false, false); - } - - /** - * Creates get API key request for given realm and user name - * @param realmName realm name - * @param userName user name - * @return {@link GetApiKeyRequest} - */ - @Deprecated - public static GetApiKeyRequest usingRealmAndUserName(String realmName, String userName) { - return new GetApiKeyRequest(realmName, userName, null, null, false, false); - } - - /** - * Creates get API key request for given api key id - * @param apiKeyId api key id - * @param ownedByAuthenticatedUser set {@code true} if the request is only for the API keys owned by current authenticated user else - * {@code false} - * @return {@link GetApiKeyRequest} - */ - @Deprecated - public static GetApiKeyRequest usingApiKeyId(String apiKeyId, boolean ownedByAuthenticatedUser) { - return new GetApiKeyRequest(null, null, apiKeyId, null, ownedByAuthenticatedUser, false); - } - - /** - * Creates get api key request for given api key name - * @param apiKeyName api key name - * @param ownedByAuthenticatedUser set {@code true} if the request is only for the API keys owned by current authenticated user else - * {@code false} - * @return {@link GetApiKeyRequest} - */ - public static GetApiKeyRequest usingApiKeyName(String apiKeyName, boolean ownedByAuthenticatedUser) { - return new GetApiKeyRequest(null, null, null, apiKeyName, ownedByAuthenticatedUser, false); - } - - /** - * Creates get api key request to retrieve api key information for the api keys owned by the current authenticated user. - */ - @Deprecated - public static GetApiKeyRequest forOwnedApiKeys() { - return new GetApiKeyRequest(null, null, null, null, true, false); - } - - /** - * Creates get api key request to retrieve api key information for all api keys if the authenticated user is authorized to do so. - */ - @Deprecated - public static GetApiKeyRequest forAllApiKeys() { - return GetApiKeyRequest.builder().build(); - } - @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java index 989fa8093fa3e..a94351c08f4e5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java @@ -29,19 +29,22 @@ public class GetApiKeyRequestTests extends ESTestCase { public void testRequestValidation() { - GetApiKeyRequest request = GetApiKeyRequest.usingApiKeyId(randomAlphaOfLength(5), randomBoolean()); + GetApiKeyRequest request = GetApiKeyRequest.builder() + .apiKeyId(randomAlphaOfLength(5)) + .ownedByAuthenticatedUser(randomBoolean()) + .build(); ActionRequestValidationException ve = request.validate(); assertNull(ve); - request = GetApiKeyRequest.usingApiKeyName(randomAlphaOfLength(5), randomBoolean()); + request = GetApiKeyRequest.builder().apiKeyName(randomAlphaOfLength(5)).ownedByAuthenticatedUser(randomBoolean()).build(); ve = request.validate(); assertNull(ve); - request = GetApiKeyRequest.usingRealmName(randomAlphaOfLength(5)); + request = GetApiKeyRequest.builder().realmName(randomAlphaOfLength(5)).build(); ve = request.validate(); assertNull(ve); - request = GetApiKeyRequest.usingUserName(randomAlphaOfLength(5)); + request = GetApiKeyRequest.builder().userName(randomAlphaOfLength(5)).build(); ve = request.validate(); assertNull(ve); - request = GetApiKeyRequest.usingRealmAndUserName(randomAlphaOfLength(5), randomAlphaOfLength(7)); + request = GetApiKeyRequest.builder().realmName(randomAlphaOfLength(5)).userName(randomAlphaOfLength(7)).build(); ve = request.validate(); assertNull(ve); } @@ -120,9 +123,8 @@ public void writeTo(StreamOutput out) throws IOException { public void testSerialization() throws IOException { final String apiKeyId = randomAlphaOfLength(5); - final boolean ownedByAuthenticatedUser = true; - GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.usingApiKeyId(apiKeyId, ownedByAuthenticatedUser); { + final GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.builder().ownedByAuthenticatedUser(true).apiKeyId(apiKeyId).build(); ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); out.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_3_0)); @@ -137,13 +139,34 @@ public void testSerialization() throws IOException { assertThat(requestFromInputStream.ownedByAuthenticatedUser(), is(false)); } { + final GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.builder() + .apiKeyId(apiKeyId) + .ownedByAuthenticatedUser(true) + .withLimitedBy(randomBoolean()) + .build(); + ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); + OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); + out.setVersion(randomVersionBetween(random(), Version.V_7_4_0, Version.V_8_4_0)); + getApiKeyRequest.writeTo(out); + + InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); + inputStreamStreamInput.setVersion(randomVersionBetween(random(), Version.V_7_4_0, Version.V_8_4_0)); + GetApiKeyRequest requestFromInputStream = new GetApiKeyRequest(inputStreamStreamInput); + + assertThat(requestFromInputStream.getApiKeyId(), equalTo(getApiKeyRequest.getApiKeyId())); + assertThat(requestFromInputStream.ownedByAuthenticatedUser(), is(true)); + // old version so the default for `withLimitedBy` is false + assertThat(requestFromInputStream.withLimitedBy(), is(false)); + } + { + final GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.builder().apiKeyId(apiKeyId).withLimitedBy(randomBoolean()).build(); ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setVersion(randomVersionBetween(random(), Version.V_7_4_0, Version.CURRENT)); + out.setVersion(randomVersionBetween(random(), Version.V_8_5_0, Version.CURRENT)); getApiKeyRequest.writeTo(out); InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); - inputStreamStreamInput.setVersion(randomVersionBetween(random(), Version.V_7_4_0, Version.CURRENT)); + inputStreamStreamInput.setVersion(randomVersionBetween(random(), Version.V_8_5_0, Version.CURRENT)); GetApiKeyRequest requestFromInputStream = new GetApiKeyRequest(inputStreamStreamInput); assertThat(requestFromInputStream, equalTo(getApiKeyRequest)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 4a717c56d4510..bfef729e8262d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -416,7 +416,7 @@ public void testKibanaSystemRole() { final CreateApiKeyRequest createApiKeyRequest = new CreateApiKeyRequest(randomAlphaOfLength(8), null, null); assertThat(kibanaRole.cluster().check(CreateApiKeyAction.NAME, createApiKeyRequest, authentication), is(true)); // Can only get and query its own API keys - assertThat(kibanaRole.cluster().check(GetApiKeyAction.NAME, GetApiKeyRequest.forAllApiKeys(), authentication), is(false)); + assertThat(kibanaRole.cluster().check(GetApiKeyAction.NAME, GetApiKeyRequest.builder().build(), authentication), is(false)); assertThat( kibanaRole.cluster().check(GetApiKeyAction.NAME, GetApiKeyRequest.builder().ownedByAuthenticatedUser().build(), authentication), is(true) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index ff20c52923402..d63b6183ffb74 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -475,7 +475,7 @@ public void testInvalidatedApiKeysDeletedByRemover() throws Exception { refreshSecurityIndex(); PlainActionFuture getApiKeyResponseListener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.builder().realmName("file").build(), getApiKeyResponseListener); Set expectedKeyIds = Sets.newHashSet(createdApiKeys.get(0).getId(), createdApiKeys.get(1).getId()); boolean apiKeyInvalidatedButNotYetDeletedByExpiredApiKeysRemover = false; for (ApiKey apiKey : getApiKeyResponseListener.get().getApiKeyInfos()) { @@ -513,7 +513,7 @@ public void testInvalidatedApiKeysDeletedByRemover() throws Exception { // Verify that 1st invalidated API key is deleted whereas the next one may be or may not be as it depends on whether update was // indexed before ExpiredApiKeysRemover ran getApiKeyResponseListener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.builder().realmName("file").build(), getApiKeyResponseListener); expectedKeyIds = Sets.newHashSet(createdApiKeys.get(1).getId()); apiKeyInvalidatedButNotYetDeletedByExpiredApiKeysRemover = false; for (ApiKey apiKey : getApiKeyResponseListener.get().getApiKeyInfos()) { @@ -558,7 +558,7 @@ public void testExpiredApiKeysBehaviorWhenKeysExpired1WeekBeforeAnd1DayBefore() Instant created = Instant.now(); PlainActionFuture getApiKeyResponseListener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.builder().realmName("file").build(), getApiKeyResponseListener); assertThat(getApiKeyResponseListener.get().getApiKeyInfos().length, is(noOfKeys)); // Expire the 1st key such that it cannot be deleted by the remover @@ -596,7 +596,7 @@ public void testExpiredApiKeysBehaviorWhenKeysExpired1WeekBeforeAnd1DayBefore() // Verify get API keys does not return api keys deleted by ExpiredApiKeysRemover getApiKeyResponseListener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.builder().realmName("file").build(), getApiKeyResponseListener); Set expectedKeyIds = Sets.newHashSet( createdApiKeys.get(0).getId(), @@ -1059,7 +1059,7 @@ public void testGetApiKeysOwnedByRunAsUserWillNotWorkWhenAuthUserInfoIsGiven() t ); getClientForRunAsUser().execute( GetApiKeyAction.INSTANCE, - GetApiKeyRequest.usingRealmAndUserName(invalidRealmAndUserPair.v1(), invalidRealmAndUserPair.v2()), + GetApiKeyRequest.builder().realmName(invalidRealmAndUserPair.v1()).userName(invalidRealmAndUserPair.v2()).build(), listener ); final ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, listener::actionGet); @@ -1156,7 +1156,7 @@ public void testGetAllApiKeysFailsForUserWithNoRoleOrRetrieveOwnApiKeyRole() thr Collections.singletonMap("Authorization", basicAuthHeaderValue(withUser, TEST_PASSWORD_SECURE_STRING)) ); PlainActionFuture listener = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.forAllApiKeys(), listener); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.builder().build(), listener); ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, () -> listener.actionGet()); assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/get", withUser); } @@ -1298,14 +1298,14 @@ public void testApiKeyAuthorizationApiKeyMustBeAbleToRetrieveItsOwnInformationBu final PlainActionFuture failureListener = new PlainActionFuture<>(); client.execute( GetApiKeyAction.INSTANCE, - GetApiKeyRequest.usingApiKeyId(responses.get(1).getId(), randomBoolean()), + GetApiKeyRequest.builder().apiKeyId(responses.get(1).getId()).ownedByAuthenticatedUser(randomBoolean()).build(), failureListener ); ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, () -> failureListener.actionGet()); assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/get", ES_TEST_ROOT_USER, responses.get(0).getId()); final PlainActionFuture failureListener1 = new PlainActionFuture<>(); - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.forOwnedApiKeys(), failureListener1); + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.builder().ownedByAuthenticatedUser().build(), failureListener1); ese = expectThrows(ElasticsearchSecurityException.class, () -> failureListener1.actionGet()); assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/get", ES_TEST_ROOT_USER, responses.get(0).getId()); } @@ -1576,7 +1576,7 @@ public void testApiKeyRunAsAnotherUserCanCreateApiKey() { final GetApiKeyResponse getApiKeyResponse = client.execute( GetApiKeyAction.INSTANCE, - GetApiKeyRequest.usingApiKeyId(response2.getId(), true) + GetApiKeyRequest.builder().apiKeyId(response2.getId()).ownedByAuthenticatedUser(true).build() ).actionGet(); assertThat(getApiKeyResponse.getApiKeyInfos(), arrayWithSize(1)); final ApiKey apiKeyInfo = getApiKeyResponse.getApiKeyInfos()[0]; @@ -2729,7 +2729,9 @@ private void assertApiKeyNotCreated(Client client, String keyName) throws Execut new RefreshRequestBuilder(client, RefreshAction.INSTANCE).setIndices(SECURITY_MAIN_ALIAS).execute().get(); assertEquals( 0, - client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.usingApiKeyName(keyName, false)).get().getApiKeyInfos().length + client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.builder().apiKeyName(keyName).ownedByAuthenticatedUser(false).build()) + .get() + .getApiKeyInfos().length ); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java index 8e2ff0deefc5c..8448cf174ce67 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java @@ -244,7 +244,7 @@ public void testGetApiKeyWorksForTheApiKeyItself() { // Can get its own info final GetApiKeyResponse getApiKeyResponse = clientKey1.execute( GetApiKeyAction.INSTANCE, - GetApiKeyRequest.usingApiKeyId(apiKeyId, randomBoolean()) + GetApiKeyRequest.builder().apiKeyId(apiKeyId).ownedByAuthenticatedUser(randomBoolean()).build() ).actionGet(); assertThat(getApiKeyResponse.getApiKeyInfos().length, equalTo(1)); assertThat(getApiKeyResponse.getApiKeyInfos()[0].getId(), equalTo(apiKeyId)); @@ -252,7 +252,7 @@ public void testGetApiKeyWorksForTheApiKeyItself() { // Cannot get any other keys final ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, - () -> clientKey1.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.forAllApiKeys()).actionGet() + () -> clientKey1.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.builder().build()).actionGet() ); assertThat(e.getMessage(), containsString("unauthorized for API key id [" + apiKeyId + "]")); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java index 318173d76746f..3a2541dee7355 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java @@ -156,10 +156,13 @@ public void testElasticFleetServerPrivileges() { .check(CreateApiKeyAction.NAME, new CreateApiKeyRequest(randomAlphaOfLengthBetween(3, 8), null, null), authentication), is(true) ); - assertThat(role.cluster().check(GetApiKeyAction.NAME, GetApiKeyRequest.forOwnedApiKeys(), authentication), is(true)); + assertThat( + role.cluster().check(GetApiKeyAction.NAME, GetApiKeyRequest.builder().ownedByAuthenticatedUser().build(), authentication), + is(true) + ); assertThat(role.cluster().check(InvalidateApiKeyAction.NAME, InvalidateApiKeyRequest.forOwnedApiKeys(), authentication), is(true)); - assertThat(role.cluster().check(GetApiKeyAction.NAME, randomFrom(GetApiKeyRequest.forAllApiKeys()), authentication), is(false)); + assertThat(role.cluster().check(GetApiKeyAction.NAME, randomFrom(GetApiKeyRequest.builder().build()), authentication), is(false)); assertThat( role.cluster() .check( @@ -320,7 +323,10 @@ public void testElasticEnterpriseSearchServerAccount() { .check(CreateApiKeyAction.NAME, new CreateApiKeyRequest(randomAlphaOfLengthBetween(3, 8), null, null), authentication), is(true) ); - assertThat(role.cluster().check(GetApiKeyAction.NAME, GetApiKeyRequest.forOwnedApiKeys(), authentication), is(true)); + assertThat( + role.cluster().check(GetApiKeyAction.NAME, GetApiKeyRequest.builder().ownedByAuthenticatedUser().build(), authentication), + is(true) + ); assertThat(role.cluster().check(InvalidateApiKeyAction.NAME, InvalidateApiKeyRequest.forOwnedApiKeys(), authentication), is(true)); assertThat(role.cluster().check(PutUserAction.NAME, request, authentication), is(true)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index 42550d5668a03..ec4a6c7e6d0f0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -329,7 +329,7 @@ public void testSameUserPermissionDeniesSelfApiKeyInfoRetrievalWithLimitedByWhen public void testSameUserPermissionDeniesApiKeyInfoRetrievalWhenAuthenticatedByADifferentApiKey() { final User user = new User("joe"); final String apiKeyId = randomAlphaOfLengthBetween(4, 7); - final TransportRequest request = GetApiKeyRequest.usingApiKeyId(apiKeyId, false); + final TransportRequest request = GetApiKeyRequest.builder().apiKeyId(apiKeyId).ownedByAuthenticatedUser(false).build(); final Authentication authentication = AuthenticationTests.randomApiKeyAuthentication(user, randomAlphaOfLength(8)); assertFalse(RBACEngine.checkSameUserPermissions(GetApiKeyAction.NAME, request, authentication)); } @@ -337,7 +337,7 @@ public void testSameUserPermissionDeniesApiKeyInfoRetrievalWhenAuthenticatedByAD public void testSameUserPermissionDeniesApiKeyInfoRetrievalWhenLookedupByIsPresent() { final User user = new User("joe"); final String apiKeyId = randomAlphaOfLengthBetween(4, 7); - final TransportRequest request = GetApiKeyRequest.usingApiKeyId(apiKeyId, false); + final TransportRequest request = GetApiKeyRequest.builder().apiKeyId(apiKeyId).ownedByAuthenticatedUser(false).build(); final Authentication authentication = AuthenticationTests.randomApiKeyAuthentication(new User("not-joe"), apiKeyId) .runAs(user, new Authentication.RealmRef("name", "type", randomAlphaOfLengthBetween(3, 8))); assertFalse(RBACEngine.checkSameUserPermissions(GetApiKeyAction.NAME, request, authentication)); From 80796fba2853d8d7bf2d0f1a045544ba49afab97 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 16 Aug 2022 14:16:46 +0200 Subject: [PATCH 211/265] Small cleanups to Allocation Performance (#89378) Two fixes: 1. Looking up `Custom` values over and over for every shard incurs a measurable cost. This removes that cost for desired nodes and node shutdown metadata. 2. Node shutdown metadata logic wasn't inlining nicely because of the wrapped map. No need to be as complicated as we were in many spots, use a simple immutable map for all operations and remove a bunch of branching. --- .../cluster/metadata/Metadata.java | 4 +--- .../cluster/metadata/NodesShutdownMetadata.java | 5 ++--- .../routing/allocation/RoutingAllocation.java | 16 +++++++++++----- .../decider/NodeShutdownAllocationDecider.java | 17 ++--------------- 4 files changed, 16 insertions(+), 26 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index f25599bcd4915..3a786caf6b563 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1024,9 +1024,7 @@ public Map dataStreamAliases() { } public Map nodeShutdowns() { - return Optional.ofNullable((NodesShutdownMetadata) this.custom(NodesShutdownMetadata.TYPE)) - .map(NodesShutdownMetadata::getAllNodeMetadataMap) - .orElse(Collections.emptyMap()); + return this.custom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY).getAllNodeMetadataMap(); } public Map customs() { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java index f2767162f926d..ab1f1a6812f55 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java @@ -22,7 +22,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.List; @@ -93,11 +92,11 @@ public static NodesShutdownMetadata getShutdownsOrEmpty(final ClusterState state private final Map nodes; public NodesShutdownMetadata(Map nodes) { - this.nodes = Collections.unmodifiableMap(nodes); + this.nodes = Map.copyOf(nodes); } public NodesShutdownMetadata(StreamInput in) throws IOException { - this(in.readMap(StreamInput::readString, SingleNodeShutdownMetadata::new)); + this(in.readImmutableMap(StreamInput::readString, SingleNodeShutdownMetadata::new)); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index e502362aa91f6..c09901add2237 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -26,7 +26,6 @@ import org.elasticsearch.snapshots.RestoreService.RestoreInProgressUpdater; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -73,6 +72,11 @@ public class RoutingAllocation { private final Map nodeReplacementTargets; + private final Map nodeShutdowns; + + @Nullable + private final DesiredNodes desiredNodes; + public RoutingAllocation( AllocationDeciders deciders, ClusterState clusterState, @@ -106,13 +110,15 @@ public RoutingAllocation( this.clusterInfo = clusterInfo; this.shardSizeInfo = shardSizeInfo; this.currentNanoTime = currentNanoTime; + this.nodeShutdowns = clusterState.metadata().nodeShutdowns(); Map targetNameToShutdown = new HashMap<>(); - for (SingleNodeShutdownMetadata shutdown : clusterState.metadata().nodeShutdowns().values()) { + for (SingleNodeShutdownMetadata shutdown : nodeShutdowns.values()) { if (shutdown.getType() == SingleNodeShutdownMetadata.Type.REPLACE) { targetNameToShutdown.put(shutdown.getTargetNodeName(), shutdown); } } - this.nodeReplacementTargets = Collections.unmodifiableMap(targetNameToShutdown); + this.nodeReplacementTargets = Map.copyOf(targetNameToShutdown); + this.desiredNodes = DesiredNodes.latestFromClusterState(clusterState); } /** returns the nano time captured at the beginning of the allocation. used to make sure all time based decisions are aligned */ @@ -173,14 +179,14 @@ public SnapshotShardSizeInfo snapshotShardSizeInfo() { @Nullable public DesiredNodes desiredNodes() { - return DesiredNodes.latestFromClusterState(clusterState); + return desiredNodes; } /** * Returns the map of node id to shutdown metadata currently in the cluster */ public Map nodeShutdowns() { - return metadata().nodeShutdowns(); + return nodeShutdowns; } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java index 0c6a481ce03eb..fecc3b814647f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java @@ -9,14 +9,11 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.core.Nullable; /** * An allocation decider that prevents shards from being allocated to a @@ -35,7 +32,7 @@ public class NodeShutdownAllocationDecider extends AllocationDecider { */ @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - final SingleNodeShutdownMetadata thisNodeShutdownMetadata = getNodeShutdownMetadata(allocation.metadata(), node.nodeId()); + final SingleNodeShutdownMetadata thisNodeShutdownMetadata = allocation.nodeShutdowns().get(node.nodeId()); if (thisNodeShutdownMetadata == null) { // There's no shutdown metadata for this node, return yes. @@ -73,7 +70,7 @@ public Decision canRemain(IndexMetadata indexMetadata, ShardRouting shardRouting */ @Override public Decision shouldAutoExpandToNode(IndexMetadata indexMetadata, DiscoveryNode node, RoutingAllocation allocation) { - SingleNodeShutdownMetadata thisNodeShutdownMetadata = getNodeShutdownMetadata(allocation.metadata(), node.getId()); + SingleNodeShutdownMetadata thisNodeShutdownMetadata = allocation.nodeShutdowns().get(node.getId()); if (thisNodeShutdownMetadata == null) { return allocation.decision(Decision.YES, NAME, "node [%s] is not preparing for removal from the cluster", node.getId()); @@ -95,14 +92,4 @@ public Decision shouldAutoExpandToNode(IndexMetadata indexMetadata, DiscoveryNod }; } - @Nullable - private static SingleNodeShutdownMetadata getNodeShutdownMetadata(Metadata metadata, String nodeId) { - NodesShutdownMetadata nodesShutdownMetadata = metadata.custom(NodesShutdownMetadata.TYPE); - if (nodesShutdownMetadata == null || nodesShutdownMetadata.getAllNodeMetadataMap() == null) { - // There are no nodes in the process of shutting down, return null. - return null; - } - - return nodesShutdownMetadata.getAllNodeMetadataMap().get(nodeId); - } } From 914e216ebdae61bd8887587e3d6c8d09debf70cc Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 16 Aug 2022 10:05:45 -0400 Subject: [PATCH 212/265] Prepare synthetic source docs for tech-preview (#89358) Now that we're releasing synthetic _source as a tech preview feature, we no longer want to remove the docs from the non-release builds. And we want to mark all of the headings describing synthetic `_source` as a preview. --- docs/reference/mapping/fields/source-field.asciidoc | 3 --- docs/reference/mapping/fields/synthetic-source.asciidoc | 2 +- .../reference/mapping/types/aggregate-metric-double.asciidoc | 5 +---- docs/reference/mapping/types/boolean.asciidoc | 4 +--- docs/reference/mapping/types/geo-point.asciidoc | 4 +--- docs/reference/mapping/types/ip.asciidoc | 5 +---- docs/reference/mapping/types/keyword.asciidoc | 5 +---- docs/reference/mapping/types/numeric.asciidoc | 5 +---- docs/reference/mapping/types/text.asciidoc | 4 +--- 9 files changed, 8 insertions(+), 29 deletions(-) diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index 76d98303dce82..f905be3d452ba 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -6,7 +6,6 @@ at index time. The `_source` field itself is not indexed (and thus is not searchable), but it is stored so that it can be returned when executing _fetch_ requests, like <> or <>. -ifeval::["{release-state}"=="unreleased"] If disk usage is important to you then have a look at <> which shrinks disk usage at the cost of only supporting a subset of mappings and slower fetches or (not recommended) @@ -14,8 +13,6 @@ only supporting a subset of mappings and slower fetches or (not recommended) usage but disables many features. include::synthetic-source.asciidoc[] -endif::[] - [[disable-source-field]] ==== Disabling the `_source` field diff --git a/docs/reference/mapping/fields/synthetic-source.asciidoc b/docs/reference/mapping/fields/synthetic-source.asciidoc index 32731423a4691..7a1dc89af2ede 100644 --- a/docs/reference/mapping/fields/synthetic-source.asciidoc +++ b/docs/reference/mapping/fields/synthetic-source.asciidoc @@ -1,5 +1,5 @@ [[synthetic-source]] -==== Synthetic `_source` +==== Synthetic `_source` preview:[] Though very handy to have around, the source field takes up a significant amount of space on disk. Instead of storing source documents on disk exactly as you diff --git a/docs/reference/mapping/types/aggregate-metric-double.asciidoc b/docs/reference/mapping/types/aggregate-metric-double.asciidoc index 61b4adf2fd029..55a356c6a28ac 100644 --- a/docs/reference/mapping/types/aggregate-metric-double.asciidoc +++ b/docs/reference/mapping/types/aggregate-metric-double.asciidoc @@ -252,9 +252,8 @@ The search returns the following hit. The value of the `default_metric` field, ---- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,/] -ifeval::["{release-state}"=="unreleased"] [[aggregate-metric-double-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] `aggregate_metric-double` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <>. @@ -301,5 +300,3 @@ Will become: } ---- // TEST[s/^/{"_source":/ s/\n$/}/] - -endif::[] diff --git a/docs/reference/mapping/types/boolean.asciidoc b/docs/reference/mapping/types/boolean.asciidoc index 52fefddd0fe68..ed6e2648dee4a 100644 --- a/docs/reference/mapping/types/boolean.asciidoc +++ b/docs/reference/mapping/types/boolean.asciidoc @@ -215,9 +215,8 @@ The following parameters are accepted by `boolean` fields: Metadata about the field. -ifeval::["{release-state}"=="unreleased"] [[boolean-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] `boolean` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <> or with <> disabled. @@ -249,4 +248,3 @@ Will become: } ---- // TEST[s/^/{"_source":/ s/\n$/}/] -endif::[] diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 0b866861e7365..fad74ba733cc2 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -204,9 +204,8 @@ def lat = doc['location'].lat; def lon = doc['location'].lon; -------------------------------------------------- -ifeval::["{release-state}"=="unreleased"] [[geo-point-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] `geo_point` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <>, <>, or with @@ -246,4 +245,3 @@ Will become: } ---- // TEST[s/^/{"_source":/ s/\n$/}/] -endif::[] diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index 141a133184927..b35a5486906b7 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -151,9 +151,8 @@ GET my-index-000001/_search } -------------------------------------------------- -ifeval::["{release-state}"=="unreleased"] [[ip-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] `ip` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <>, <>, or with @@ -192,5 +191,3 @@ Will become: NOTE: IPv4 addresses are sorted as though they were IPv6 addresses prefixed by `::ffff:0:0:0/96` as specified by https://datatracker.ietf.org/doc/html/rfc6144[rfc6144]. - -endif::[] diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index de6080e8c1679..3e985c05e696f 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -174,9 +174,8 @@ Dimension fields have the following constraints: * The field cannot use a <>. -- -ifeval::["{release-state}"=="unreleased"] [[keyword-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] `keyword` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <>, a <>, @@ -212,8 +211,6 @@ Will become: ---- // TEST[s/^/{"_source":/ s/\n$/}/] -endif::[] - include::constant-keyword.asciidoc[] include::wildcard.asciidoc[] diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index 7eb31d8c1e4fb..0228430caf45d 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -229,9 +229,8 @@ endif::[] of `scaling_factor` improve accuracy but also increase space requirements. This parameter is required. -ifeval::["{release-state}"=="unreleased"] [[numeric-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] All numeric fields except `unsigned_long` support <> in their default configuration. Synthetic `_source` cannot be used together with <>, <>, or @@ -293,5 +292,3 @@ Will become: } ---- // TEST[s/^/{"_source":/ s/\n$/}/] - -endif::[] diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index 5ba3d7fbbc46d..d6c51bf81f172 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -159,9 +159,8 @@ The following parameters are accepted by `text` fields: Metadata about the field. -ifeval::["{release-state}"=="unreleased"] [[text-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] `text` fields support <> if they have a `keyword` sub-field that supports synthetic `_source` and *do not* have <>. @@ -214,7 +213,6 @@ NOTE: Reordering text fields can have an effect on <> for more detail. You can avoid this by making sure the `slop` parameter on the phrase queries is lower than the `position_increment_gap`. This is the default. -endif::[] [[fielddata-mapping-param]] ==== `fielddata` mapping parameter From fd9473ab1c214af1e3e3fe6c7a5f584e696352fb Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Tue, 16 Aug 2022 16:00:02 +0100 Subject: [PATCH 213/265] [ML] Get categories endpoint to use ECS Grok patterns (#89386) Change the Grok pattern creator for _ml/anomaly_detectors//results/categories to always use ECS Grok patterns relates #77065 --- .../categorization/GrokPatternCreator.java | 10 +++--- .../GrokPatternCreatorTests.java | 31 ++++++++++++++++--- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java index 0fb9efa62dbb7..530c76eb52e73 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java @@ -28,6 +28,8 @@ */ public final class GrokPatternCreator { + private static final boolean ECS_COMPATIBILITY = true; + private static final Logger logger = LogManager.getLogger(GrokPatternCreator.class); private static final String PREFACE = "preface"; @@ -39,7 +41,7 @@ public final class GrokPatternCreator { * such that more generic patterns come after more specific patterns. */ private static final List ORDERED_CANDIDATE_GROK_PATTERNS = Arrays.asList( - new GrokPatternCandidate("TOMCAT_DATESTAMP", "timestamp"), + new GrokPatternCandidate("TOMCATLEGACY_DATESTAMP", "timestamp"), new GrokPatternCandidate("TIMESTAMP_ISO8601", "timestamp"), new GrokPatternCandidate("DATESTAMP_RFC822", "timestamp"), new GrokPatternCandidate("DATESTAMP_RFC2822", "timestamp"), @@ -51,8 +53,8 @@ public final class GrokPatternCreator { new GrokPatternCandidate("CISCOTIMESTAMP", "timestamp"), new GrokPatternCandidate("DATE", "date"), new GrokPatternCandidate("TIME", "time"), - new GrokPatternCandidate("LOGLEVEL", "loglevel"), - new GrokPatternCandidate("URI", "uri"), + new GrokPatternCandidate("LOGLEVEL", "log.level"), + new GrokPatternCandidate("URI", "url.original"), new GrokPatternCandidate("UUID", "uuid"), new GrokPatternCandidate("MAC", "macaddress"), // Can't use \b as the breaks, because slashes are not "word" characters @@ -284,7 +286,7 @@ static class GrokPatternCandidate { this.grokPatternName = grokPatternName; this.fieldName = fieldName; this.grok = new Grok( - Grok.getBuiltinPatterns(false), + Grok.getBuiltinPatterns(ECS_COMPATIBILITY), "%{DATA:" + PREFACE + "}" + preBreak + "%{" + grokPatternName + ":this}" + postBreak + "%{GREEDYDATA:" + EPILOGUE + "}", logger::warn ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreatorTests.java index 1179392f22229..52161177d7abb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreatorTests.java @@ -28,7 +28,7 @@ public void testBuildFieldName() { assertEquals("field3", GrokPatternCreator.buildFieldName(fieldNameCountStore, "field")); assertEquals("timestamp", GrokPatternCreator.buildFieldName(fieldNameCountStore, "timestamp")); assertEquals("field4", GrokPatternCreator.buildFieldName(fieldNameCountStore, "field")); - assertEquals("uri", GrokPatternCreator.buildFieldName(fieldNameCountStore, "uri")); + assertEquals("url.original", GrokPatternCreator.buildFieldName(fieldNameCountStore, "url.original")); assertEquals("timestamp2", GrokPatternCreator.buildFieldName(fieldNameCountStore, "timestamp")); assertEquals("field5", GrokPatternCreator.buildFieldName(fieldNameCountStore, "field")); } @@ -85,7 +85,7 @@ public void testAppendBestGrokMatchForStringsGivenTimestampsAndLogLevels() { mustMatchStrings ); - assertEquals(".+?%{TIMESTAMP_ISO8601:timestamp}.+?%{LOGLEVEL:loglevel}.+?", overallGrokPatternBuilder.toString()); + assertEquals(".+?%{TIMESTAMP_ISO8601:timestamp}.+?%{LOGLEVEL:log.level}.+?", overallGrokPatternBuilder.toString()); } public void testAppendBestGrokMatchForStringsGivenTomcatDatestamps() { @@ -110,7 +110,7 @@ public void testAppendBestGrokMatchForStringsGivenTomcatDatestamps() { mustMatchStrings ); - assertEquals(".*?%{TOMCAT_DATESTAMP:timestamp}.+?%{LOGLEVEL:loglevel}.+?", overallGrokPatternBuilder.toString()); + assertEquals(".*?%{TOMCATLEGACY_DATESTAMP:timestamp}.+?%{LOGLEVEL:log.level}.+?", overallGrokPatternBuilder.toString()); } public void testAppendBestGrokMatchForStringsGivenTrappyFloatCandidates() { @@ -252,7 +252,7 @@ public void testAppendBestGrokMatchForStringsGivenUris() { mustMatchStrings ); - assertEquals(".*?%{URI:uri}.*?", overallGrokPatternBuilder.toString()); + assertEquals(".*?%{URI:url.original}.*?", overallGrokPatternBuilder.toString()); } public void testAppendBestGrokMatchForStringsGivenPaths() { @@ -314,6 +314,29 @@ public void testFindBestGrokMatchFromExamplesGivenCatalinaLogs() { ); } + public void testFindBestGrokMatchFromExamplesGivenCatalina8Logs() { + + String regex = ".*?WARNING.+?org\\.apache\\.tomcat\\.util\\.http\\.Parameters.+?processParameters.+?Parameters.+?" + + "Invalid.+?chunk.+?ignored.*"; + // The embedded newline ensures the regular expressions we're using are compiled with Pattern.DOTALL + Collection examples = Arrays.asList( + "29-Aug-2009 12:03:33 WARNING [main] org.apache.tomcat.util.http.Parameters processParameters: Parameters: \n" + + "Invalid chunk ignored.", + "29-Aug-2009 12:03:33 WARNING [main] org.apache.tomcat.util.http.Parameters processParameters: Parameters: \n" + + "Invalid chunk ignored.", + "29-Aug-2009 12:03:33 WARNING [main] org.apache.tomcat.util.http.Parameters processParameters: Parameters: \n" + + "Invalid chunk ignored.", + "29-Aug-2009 12:03:33 WARNING [main] org.apache.tomcat.util.http.Parameters processParameters: Parameters: \n" + + "Invalid chunk ignored." + ); + + assertEquals( + ".*?%{CATALINA_DATESTAMP:timestamp}.+?WARNING.+?org\\.apache\\.tomcat\\.util\\.http\\.Parameters.+?processParameters.+?" + + "Parameters.+?Invalid.+?chunk.+?ignored.*", + GrokPatternCreator.findBestGrokMatchFromExamples("foo", regex, examples) + ); + } + public void testFindBestGrokMatchFromExamplesGivenMultiTimestampLogs() { String regex = ".*?Authpriv.+?Info.+?sshd.+?subsystem.+?request.+?for.+?sftp.*"; From ac9f12fd631c94303ce8127f104016e3550e45db Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Tue, 16 Aug 2022 09:05:01 -0600 Subject: [PATCH 214/265] Add logging in GlobalCheckpointSyncIT (#89185) The test GlobalCheckpointSyncIT#testBackgroundGlobalCheckpointSync failed once recently due to an engine already close exception. It has not occurred again and the reasoning is unclear. This commit adds a log line to indicate exactly when it happens, which shard it is, and what the current state of the index shard is. Closes #88428. --- .../index/seqno/GlobalCheckpointSyncIT.java | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java index 5b7ba93405d61..9bf6f24a9015d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.seqno; +import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -194,12 +195,21 @@ private void runGlobalCheckpointSyncTest( for (IndexService indexService : indicesService) { for (IndexShard shard : indexService) { if (shard.routingEntry().primary()) { - final SeqNoStats seqNoStats = shard.seqNoStats(); - assertThat( - "shard " + shard.routingEntry() + " seq_no [" + seqNoStats + "]", - seqNoStats.getGlobalCheckpoint(), - equalTo(seqNoStats.getMaxSeqNo()) - ); + try { + final SeqNoStats seqNoStats = shard.seqNoStats(); + assertThat( + "shard " + shard.routingEntry() + " seq_no [" + seqNoStats + "]", + seqNoStats.getGlobalCheckpoint(), + equalTo(seqNoStats.getMaxSeqNo()) + ); + } catch (AlreadyClosedException e) { + logger.error( + "received unexpected AlreadyClosedException when fetching stats for shard: {}, shard state: {}", + shard.shardId(), + shard.state() + ); + throw e; + } } } } From b327b176530b31f386071bfab653b96d21330d5f Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 16 Aug 2022 11:55:46 -0400 Subject: [PATCH 215/265] Fix shard splitting for `nested` (#89351) I broke shard splitting when `_routing` is required and you use `nested` docs. The mapping would look like this: ``` "mappings": { "_routing": { "required": true }, "properties": { "n": { "type": "nested" } } } ``` If you attempt to split an index with a mapping like this it'll blow up with an exception like this: ``` Caused by: [idx] org.elasticsearch.action.RoutingMissingException: routing is required for [idx]/[0] at org.elasticsearch.cluster.routing.IndexRouting$IdAndRoutingOnly.checkRoutingRequired(IndexRouting.java:181) at org.elasticsearch.cluster.routing.IndexRouting$IdAndRoutingOnly.getShard(IndexRouting.java:175) ``` This fixes the problem by entirely avoiding the branch of code. That branch was trying to find any top level documents that don't have a `_routing`. But we *know* that there aren't any top level documents without a routing in this case - the routing is "required". ES wouldn't have let you index any top level documents without the routing. This also adds a small pile of REST layer tests for shard splitting that hit various branches in this area. For extra paranoia. Closes #88109 --- docs/changelog/89351.yaml | 6 + .../test/indices.split/10_basic.yml | 10 + .../40_routing_partition_size.yml | 304 ++++++++++++++++++ .../indices.split/50_routing_required.yml | 205 ++++++++++++ .../test/indices.split/60_nested.yml | 94 ++++++ .../index/shard/ShardSplittingQuery.java | 19 +- 6 files changed, 634 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/89351.yaml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/60_nested.yml diff --git a/docs/changelog/89351.yaml b/docs/changelog/89351.yaml new file mode 100644 index 0000000000000..72b4477dad2af --- /dev/null +++ b/docs/changelog/89351.yaml @@ -0,0 +1,6 @@ +pr: 89351 +summary: Fix shard splitting for `nested` +area: Indices APIs +type: bug +issues: + - 88109 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/10_basic.yml index 067b2bb5774c3..97616684e70f9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -87,6 +87,11 @@ setup: - match: { _id: "3" } - match: { _source: { foo: "hello world 3" } } + - do: + search: + index: target + - match: { hits.total.value: 3 } + --- "Split from 1 to N": @@ -177,6 +182,11 @@ setup: - match: { _id: "3" } - match: { _source: { foo: "hello world 3" } } + - do: + search: + index: target + - match: { hits.total.value: 3 } + --- "Create illegal split indices": # try to do an illegal split with number_of_routing_shards set diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml new file mode 100644 index 0000000000000..80a8ccf0d1063 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml @@ -0,0 +1,304 @@ +more than 1: + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 0 + index.number_of_routing_shards: 4 + index.routing_partition_size: 2 + mappings: + _routing: + required: true + + - do: + index: + index: source + id: 1 + routing: 1 + body: { "foo": "hello world" } + + - do: + index: + index: source + id: 2 + routing: 2 + body: { "foo": "hello world 2" } + + - do: + index: + index: source + id: 3 + routing: 3 + body: { "foo": "hello world 3" } + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do the actual split + - do: + indices.split: + index: "source" + target: "target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 4 + + - do: + cluster.health: + wait_for_status: green + + - do: + get: + index: target + routing: 1 + id: 1 + + - match: { _index: target } + - match: { _id: "1" } + - match: { _source: { foo: "hello world" } } + + - do: + get: + index: target + routing: 2 + id: 2 + + - match: { _index: target } + - match: { _id: "2" } + - match: { _source: { foo: "hello world 2" } } + + - do: + get: + index: target + routing: 3 + id: 3 + + - match: { _index: target } + - match: { _id: "3" } + - match: { _source: { foo: "hello world 3" } } + + - do: + search: + index: target + - match: { hits.total.value: 3 } + +--- +exactly 1: + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 0 + index.number_of_routing_shards: 4 + index.routing_partition_size: 1 + mappings: + _routing: + required: true + + - do: + index: + index: source + id: 1 + routing: 1 + body: { "foo": "hello world" } + + - do: + index: + index: source + id: 2 + routing: 2 + body: { "foo": "hello world 2" } + + - do: + index: + index: source + id: 3 + routing: 3 + body: { "foo": "hello world 3" } + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do the actual split + - do: + indices.split: + index: "source" + target: "target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 4 + + - do: + cluster.health: + wait_for_status: green + + - do: + get: + index: target + routing: 1 + id: 1 + + - match: { _index: target } + - match: { _id: "1" } + - match: { _source: { foo: "hello world" } } + + - do: + get: + index: target + routing: 2 + id: 2 + + - match: { _index: target } + - match: { _id: "2" } + - match: { _source: { foo: "hello world 2" } } + + - do: + get: + index: target + routing: 3 + id: 3 + + - match: { _index: target } + - match: { _id: "3" } + - match: { _source: { foo: "hello world 3" } } + + - do: + search: + index: target + - match: { hits.total.value: 3 } + +--- +nested: + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 0 + index.number_of_routing_shards: 4 + index.routing_partition_size: 2 + mappings: + _routing: + required: true + properties: + n: + type: nested + + - do: + index: + index: source + id: 1 + routing: 1 + body: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } + + - do: + index: + index: source + id: 2 + routing: 2 + body: { "foo": "hello world 2" } + + - do: + index: + index: source + id: 3 + routing: 3 + body: { "foo": "hello world 3" } + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do the actual split + - do: + indices.split: + index: "source" + target: "target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 4 + + - do: + cluster.health: + wait_for_status: green + + - do: + get: + index: target + routing: 1 + id: 1 + + - match: { _index: target } + - match: { _id: "1" } + - match: { _source: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } } + + - do: + get: + index: target + routing: 2 + id: 2 + + - match: { _index: target } + - match: { _id: "2" } + - match: { _source: { foo: "hello world 2" } } + + - do: + get: + index: target + routing: 3 + id: 3 + + - match: { _index: target } + - match: { _id: "3" } + - match: { _source: { foo: "hello world 3" } } + + - do: + search: + index: target + - match: { hits.total.value: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml new file mode 100644 index 0000000000000..3de89793a1b00 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml @@ -0,0 +1,205 @@ +routing required: + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 0 + index.number_of_routing_shards: 4 + mappings: + _routing: + required: true + + - do: + index: + index: source + id: 1 + routing: 1 + body: { "foo": "hello world" } + + - do: + index: + index: source + id: 2 + routing: 2 + body: { "foo": "hello world 2" } + + - do: + index: + index: source + id: 3 + routing: 3 + body: { "foo": "hello world 3" } + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do the actual split + - do: + indices.split: + index: "source" + target: "target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 4 + + - do: + cluster.health: + wait_for_status: green + + - do: + get: + index: target + routing: 1 + id: 1 + + - match: { _index: target } + - match: { _id: "1" } + - match: { _source: { foo: "hello world" } } + + - do: + get: + index: target + routing: 2 + id: 2 + + - match: { _index: target } + - match: { _id: "2" } + - match: { _source: { foo: "hello world 2" } } + + - do: + get: + index: target + routing: 3 + id: 3 + + - match: { _index: target } + - match: { _id: "3" } + - match: { _source: { foo: "hello world 3" } } + + - do: + search: + index: target + - match: { hits.total.value: 3 } + +--- +nested: + - skip: + version: " - 8.4.99" + reason: "fixed in 8.5.0" + + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 0 + index.number_of_routing_shards: 4 + mappings: + _routing: + required: true + properties: + n: + type: nested + + - do: + index: + index: source + id: 1 + routing: 1 + body: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } + + - do: + index: + index: source + id: 2 + routing: 2 + body: { "foo": "hello world 2" } + + - do: + index: + index: source + id: 3 + routing: 3 + body: { "foo": "hello world 3" } + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do the actual split + - do: + indices.split: + index: "source" + target: "target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 4 + + - do: + cluster.health: + wait_for_status: green + + - do: + get: + index: target + routing: 1 + id: 1 + + - match: { _index: target } + - match: { _id: "1" } + - match: { _source: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } } + + - do: + get: + index: target + routing: 2 + id: 2 + + - match: { _index: target } + - match: { _id: "2" } + - match: { _source: { foo: "hello world 2" } } + + - do: + get: + index: target + routing: 3 + id: 3 + + - match: { _index: target } + - match: { _id: "3" } + - match: { _source: { foo: "hello world 3" } } + + - do: + search: + index: target + - match: { hits.total.value: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/60_nested.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/60_nested.yml new file mode 100644 index 0000000000000..1bf7a82610e79 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/60_nested.yml @@ -0,0 +1,94 @@ +--- +nested: + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 0 + index.number_of_routing_shards: 4 + mappings: + properties: + n: + type: nested + + - do: + index: + index: source + id: 1 + body: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } + + - do: + index: + index: source + id: 2 + body: { "foo": "hello world 2" } + + - do: + index: + index: source + id: 3 + body: { "foo": "hello world 3" } + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do the actual split + - do: + indices.split: + index: "source" + target: "target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 4 + + - do: + cluster.health: + wait_for_status: green + + - do: + get: + index: target + id: 1 + + - match: { _index: target } + - match: { _id: "1" } + - match: { _source: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } } + + - do: + get: + index: target + id: 2 + + - match: { _index: target } + - match: { _id: "2" } + - match: { _source: { foo: "hello world 2" } } + + - do: + get: + index: target + id: 3 + + - match: { _index: target } + - match: { _id: "3" } + - match: { _source: { foo: "hello world 3" } } + + - do: + search: + index: target + - match: { hits.total.value: 3 } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index 84e8e3e521f8c..d522be8b6bf1b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -96,7 +96,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException { } if (indexMetadata.isRoutingPartitionedIndex()) { // this is the heaviest invariant. Here we have to visit all docs stored fields do extract _id and _routing - // this this index is routing partitioned. + // this index is routing partitioned. Visitor visitor = new Visitor(leafReader); TwoPhaseIterator twoPhaseIterator = parentBitSet == null ? new RoutingPartitionedDocIdSetIterator(visitor) @@ -122,10 +122,21 @@ public Scorer scorer(LeafReaderContext context) throws IOException { return shardId == targetShardId; }, leafReader, maybeWrapConsumer.apply(bitSet::set)); + // TODO have the IndexRouting build the query and pass routingRequired in + boolean routingRequired = indexMetadata.mapping() == null ? false : indexMetadata.mapping().routingRequired(); // now if we have a mixed index where some docs have a _routing value and some don't we have to exclude the ones - // with a routing value from the next iteration an delete / select based on the ID. - if (terms.getDocCount() != leafReader.maxDoc()) { - // this is a special case where some of the docs have no routing values this sucks but it's possible today + // with a routing value from the next iteration and delete / select based on the ID. + if (routingRequired == false && terms.getDocCount() != leafReader.maxDoc()) { + /* + * This is a special case where some docs don't have routing values. + * It's annoying, but it's allowed to build an index where some documents + * hve routing and others don't. + * + * Luckily, if the routing field is required in the mapping then we can + * safely assume that all documents which are don't have a routing are + * nested documents. And we pick those up later based on the assignment + * of the document that contains them. + */ FixedBitSet hasRoutingValue = new FixedBitSet(leafReader.maxDoc()); findSplitDocs(RoutingFieldMapper.NAME, ref -> false, leafReader, maybeWrapConsumer.apply(hasRoutingValue::set)); IntConsumer bitSetConsumer = maybeWrapConsumer.apply(bitSet::set); From d248fa4b94fef8db8a44129ed66e5143e123993d Mon Sep 17 00:00:00 2001 From: GabyCT Date: Tue, 16 Aug 2022 13:13:14 -0500 Subject: [PATCH 216/265] [DOCS] Update pull request template URLs (#89290) This PR updates the pull request template to refer to main instead of master as well as it updates some URLs. Signed-off-by: Gabriela Cervantes --- .github/PULL_REQUEST_TEMPLATE.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 57f0992b9172d..ae934e1be5886 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -7,8 +7,8 @@ attention. --> - Have you signed the [contributor license agreement](https://www.elastic.co/contributor-agreement)? -- Have you followed the [contributor guidelines](https://github.com/elastic/elasticsearch/blob/master/CONTRIBUTING.md)? +- Have you followed the [contributor guidelines](https://github.com/elastic/elasticsearch/blob/main/CONTRIBUTING.md)? - If submitting code, have you built your formula locally prior to submission with `gradle check`? -- If submitting code, is your pull request against master? Unless there is a good reason otherwise, we prefer pull requests against master and will backport as needed. +- If submitting code, is your pull request against main? Unless there is a good reason otherwise, we prefer pull requests against main and will backport as needed. - If submitting code, have you checked that your submission is for an [OS and architecture that we support](https://www.elastic.co/support/matrix#show_os)? -- If you are submitting this code for a class then read our [policy](https://github.com/elastic/elasticsearch/blob/master/CONTRIBUTING.md#contributing-as-part-of-a-class) for that. +- If you are submitting this code for a class then read our [policy](https://github.com/elastic/elasticsearch/blob/main/CONTRIBUTING.md#contributing-as-part-of-a-class) for that. From 5af8ec52fe8b7e6b2a9e644f7da8d21dc3e58351 Mon Sep 17 00:00:00 2001 From: Nikola Grcevski <6207777+grcevski@users.noreply.github.com> Date: Tue, 16 Aug 2022 15:57:59 -0400 Subject: [PATCH 217/265] Support camel case dates on 7.x indices (#88914) This adds back compatibility support for camel case dates for 7.x indices used in 8.x. --- docs/changelog/88914.yaml | 6 + .../mixed_cluster/20_camel_case_on_format.yml | 19 +++ .../old_cluster/20_camel_case_on_format.yml | 79 ++++++++++ .../20_camel_case_on_format.yml | 20 +++ .../common/time/DateFormatter.java | 30 +++- .../common/time/LegacyFormatNames.java | 135 +++++++++++++++++ .../index/mapper/AbstractScriptFieldType.java | 21 ++- .../index/mapper/DateFieldMapper.java | 4 +- .../index/mapper/DateScriptFieldType.java | 20 ++- .../index/mapper/DynamicFieldsBuilder.java | 4 +- .../index/mapper/DateFieldMapperTests.java | 47 ++++++ .../mapper/DateScriptFieldTypeTests.java | 15 ++ .../xpack/deprecation/DeprecationChecks.java | 3 +- .../deprecation/IndexDeprecationChecks.java | 137 ++++++++++++++++++ .../IndexDeprecationChecksTests.java | 32 ++++ 15 files changed, 555 insertions(+), 17 deletions(-) create mode 100644 docs/changelog/88914.yaml create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_camel_case_on_format.yml create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_camel_case_on_format.yml create mode 100644 qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_camel_case_on_format.yml create mode 100644 server/src/main/java/org/elasticsearch/common/time/LegacyFormatNames.java diff --git a/docs/changelog/88914.yaml b/docs/changelog/88914.yaml new file mode 100644 index 0000000000000..af76ec186d773 --- /dev/null +++ b/docs/changelog/88914.yaml @@ -0,0 +1,6 @@ +pr: 88914 +summary: Support camel case dates on 7.x indices +area: Infra/Core +type: bug +issues: + - 84199 diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_camel_case_on_format.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_camel_case_on_format.yml new file mode 100644 index 0000000000000..be20cde9b6964 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_camel_case_on_format.yml @@ -0,0 +1,19 @@ +--- +"Verify that we can still use index with camel case date field": + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "camel_case_on_format"}}' + - '{"date_field": "2019-02-01T00:00+01:00"}' + + - do: + search: + rest_total_hits_as_int: true + index: camel_case_on_format + body: + query: + range: + date_field: + gte: "2019-01-01T00:00+01:00" + lte: "2019-03-01T00:00+01:00" diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_camel_case_on_format.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_camel_case_on_format.yml new file mode 100644 index 0000000000000..22db10f32250e --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_camel_case_on_format.yml @@ -0,0 +1,79 @@ +--- +"Create index with camel case on format (allowed with warning in 7.x)": + - skip: + version: "8.0.0 - " + reason: "at version 8.0.0, camel case is not allowed" + features: "warnings" + - do: + warnings: + - "Camel case format name strictDateOptionalTime is deprecated and will be removed in a future version. Use snake case name strict_date_optional_time instead." + indices.create: + index: camel_case_on_format + body: + settings: + index: + number_of_replicas: 2 + mappings: + "properties": + "date_field": + "type": "date" + "format": "strictDateOptionalTime" + + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "camel_case_on_format"}}' + - '{"date_field": "2019-02-01T00:00+01:00"}' + + - do: + search: + rest_total_hits_as_int: true + index: camel_case_on_format + body: + query: + range: + date_field: + gte: "2019-01-01T00:00+01:00" + lte: "2019-03-01T00:00+01:00" + - match: { hits.total: 1 } + +--- +"Create index with camel case on format (when bwc version is > 8.0.0)": + - skip: + version: " - 7.99.99" + reason: "at version 8.0.0, camel case is not allowed" + features: "warnings" + - do: + indices.create: + index: camel_case_on_format + body: + settings: + index: + number_of_replicas: 2 + mappings: + "properties": + "date_field": + "type": "date" + "format": "strict_date_optional_time" + + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "camel_case_on_format"}}' + - '{"date_field": "2019-02-01T00:00+01:00"}' + + - do: + search: + rest_total_hits_as_int: true + index: camel_case_on_format + body: + query: + range: + date_field: + gte: "2019-01-01T00:00+01:00" + lte: "2019-03-01T00:00+01:00" + - match: { hits.total: 1 } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_camel_case_on_format.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_camel_case_on_format.yml new file mode 100644 index 0000000000000..680c7fb17fd5d --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_camel_case_on_format.yml @@ -0,0 +1,20 @@ +--- +"Verify that we can use index with camel case date field in upgraded cluster": + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "camel_case_on_format"}}' + - '{"date_field": "2019-02-01T00:00+01:00"}' + + - do: + search: + rest_total_hits_as_int: true + index: camel_case_on_format + body: + query: + range: + date_field: + gte: "2019-01-01T00:00+01:00" + lte: "2019-03-01T00:00+01:00" + - match: { hits.total: 4 } diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java index b6b11abcfd90d..a97bd4989944a 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java @@ -8,6 +8,7 @@ package org.elasticsearch.common.time; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import java.time.Instant; @@ -18,6 +19,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Locale; +import java.util.stream.Collectors; public interface DateFormatter { @@ -99,6 +101,10 @@ default String formatMillis(long millis) { DateMathParser toDateMathParser(); static DateFormatter forPattern(String input) { + return forPattern(input, Version.CURRENT); + } + + static DateFormatter forPattern(String input, Version supportedVersion) { if (Strings.hasLength(input) == false) { throw new IllegalArgumentException("No date pattern provided"); } @@ -108,13 +114,14 @@ static DateFormatter forPattern(String input) { input = input.substring(1); } - List formatters = new ArrayList<>(); - for (String pattern : Strings.delimitedListToStringArray(input, "||")) { - if (Strings.hasLength(pattern) == false) { - throw new IllegalArgumentException("Cannot have empty element in multi date format pattern: " + input); + List patterns = splitCombinedPatterns(input); + List formatters = patterns.stream().map(p -> { + // make sure we still support camel case for indices created before 8.0 + if (supportedVersion.before(Version.V_8_0_0)) { + return LegacyFormatNames.camelCaseToSnakeCase(p); } - formatters.add(DateFormatters.forPattern(pattern)); - } + return p; + }).map(DateFormatters::forPattern).collect(Collectors.toList()); if (formatters.size() == 1) { return formatters.get(0); @@ -122,4 +129,15 @@ static DateFormatter forPattern(String input) { return JavaDateFormatter.combined(input, formatters); } + + static List splitCombinedPatterns(String input) { + List patterns = new ArrayList<>(); + for (String pattern : Strings.delimitedListToStringArray(input, "||")) { + if (Strings.hasLength(pattern) == false) { + throw new IllegalArgumentException("Cannot have empty element in multi date format pattern: " + input); + } + patterns.add(pattern); + } + return patterns; + } } diff --git a/server/src/main/java/org/elasticsearch/common/time/LegacyFormatNames.java b/server/src/main/java/org/elasticsearch/common/time/LegacyFormatNames.java new file mode 100644 index 0000000000000..c6a233951d094 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/LegacyFormatNames.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.time; + +import java.util.Arrays; +import java.util.Map; +import java.util.stream.Collectors; + +public enum LegacyFormatNames { + ISO8601(null, "iso8601"), + BASIC_DATE("basicDate", "basic_date"), + BASIC_DATE_TIME("basicDateTime", "basic_date_time"), + BASIC_DATE_TIME_NO_MILLIS("basicDateTimeNoMillis", "basic_date_time_no_millis"), + BASIC_ORDINAL_DATE("basicOrdinalDate", "basic_ordinal_date"), + BASIC_ORDINAL_DATE_TIME("basicOrdinalDateTime", "basic_ordinal_date_time"), + BASIC_ORDINAL_DATE_TIME_NO_MILLIS("basicOrdinalDateTimeNoMillis", "basic_ordinal_date_time_no_millis"), + BASIC_TIME("basicTime", "basic_time"), + BASIC_TIME_NO_MILLIS("basicTimeNoMillis", "basic_time_no_millis"), + BASIC_T_TIME("basicTTime", "basic_t_time"), + BASIC_T_TIME_NO_MILLIS("basicTTimeNoMillis", "basic_t_time_no_millis"), + BASIC_WEEK_DATE("basicWeekDate", "basic_week_date"), + BASIC_WEEK_DATE_TIME("basicWeekDateTime", "basic_week_date_time"), + BASIC_WEEK_DATE_TIME_NO_MILLIS("basicWeekDateTimeNoMillis", "basic_week_date_time_no_millis"), + DATE(null, "date"), + DATE_HOUR("dateHour", "date_hour"), + DATE_HOUR_MINUTE("dateHourMinute", "date_hour_minute"), + DATE_HOUR_MINUTE_SECOND("dateHourMinuteSecond", "date_hour_minute_second"), + DATE_HOUR_MINUTE_SECOND_FRACTION("dateHourMinuteSecondFraction", "date_hour_minute_second_fraction"), + DATE_HOUR_MINUTE_SECOND_MILLIS("dateHourMinuteSecondMillis", "date_hour_minute_second_millis"), + DATE_OPTIONAL_TIME("dateOptionalTime", "date_optional_time"), + DATE_TIME("dateTime", "date_time"), + DATE_TIME_NO_MILLIS("dateTimeNoMillis", "date_time_no_millis"), + HOUR(null, "hour"), + HOUR_MINUTE("hourMinute", "hour_minute"), + HOUR_MINUTE_SECOND("hourMinuteSecond", "hour_minute_second"), + HOUR_MINUTE_SECOND_FRACTION("hourMinuteSecondFraction", "hour_minute_second_fraction"), + HOUR_MINUTE_SECOND_MILLIS("hourMinuteSecondMillis", "hour_minute_second_millis"), + ORDINAL_DATE("ordinalDate", "ordinal_date"), + ORDINAL_DATE_TIME("ordinalDateTime", "ordinal_date_time"), + ORDINAL_DATE_TIME_NO_MILLIS("ordinalDateTimeNoMillis", "ordinal_date_time_no_millis"), + TIME(null, "time"), + TIME_NO_MILLIS("timeNoMillis", "time_no_millis"), + T_TIME("tTime", "t_time"), + T_TIME_NO_MILLIS("tTimeNoMillis", "t_time_no_millis"), + WEEK_DATE("weekDate", "week_date"), + WEEK_DATE_TIME("weekDateTime", "week_date_time"), + WEEK_DATE_TIME_NO_MILLIS("weekDateTimeNoMillis", "week_date_time_no_millis"), + WEEK_YEAR(null, "week_year"), + WEEKYEAR(null, "weekyear"), + WEEK_YEAR_WEEK("weekyearWeek", "weekyear_week"), + WEEKYEAR_WEEK_DAY("weekyearWeekDay", "weekyear_week_day"), + YEAR(null, "year"), + YEAR_MONTH("yearMonth", "year_month"), + YEAR_MONTH_DAY("yearMonthDay", "year_month_day"), + EPOCH_SECOND(null, "epoch_second"), + EPOCH_MILLIS(null, "epoch_millis"), + // strict date formats here, must be at least 4 digits for year and two for months and two for day + STRICT_BASIC_WEEK_DATE("strictBasicWeekDate", "strict_basic_week_date"), + STRICT_BASIC_WEEK_DATE_TIME("strictBasicWeekDateTime", "strict_basic_week_date_time"), + STRICT_BASIC_WEEK_DATE_TIME_NO_MILLIS("strictBasicWeekDateTimeNoMillis", "strict_basic_week_date_time_no_millis"), + STRICT_DATE("strictDate", "strict_date"), + STRICT_DATE_HOUR("strictDateHour", "strict_date_hour"), + STRICT_DATE_HOUR_MINUTE("strictDateHourMinute", "strict_date_hour_minute"), + STRICT_DATE_HOUR_MINUTE_SECOND("strictDateHourMinuteSecond", "strict_date_hour_minute_second"), + STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION("strictDateHourMinuteSecondFraction", "strict_date_hour_minute_second_fraction"), + STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS("strictDateHourMinuteSecondMillis", "strict_date_hour_minute_second_millis"), + STRICT_DATE_OPTIONAL_TIME("strictDateOptionalTime", "strict_date_optional_time"), + STRICT_DATE_OPTIONAL_TIME_NANOS("strictDateOptionalTimeNanos", "strict_date_optional_time_nanos"), + STRICT_DATE_TIME("strictDateTime", "strict_date_time"), + STRICT_DATE_TIME_NO_MILLIS("strictDateTimeNoMillis", "strict_date_time_no_millis"), + STRICT_HOUR("strictHour", "strict_hour"), + STRICT_HOUR_MINUTE("strictHourMinute", "strict_hour_minute"), + STRICT_HOUR_MINUTE_SECOND("strictHourMinuteSecond", "strict_hour_minute_second"), + STRICT_HOUR_MINUTE_SECOND_FRACTION("strictHourMinuteSecondFraction", "strict_hour_minute_second_fraction"), + STRICT_HOUR_MINUTE_SECOND_MILLIS("strictHourMinuteSecondMillis", "strict_hour_minute_second_millis"), + STRICT_ORDINAL_DATE("strictOrdinalDate", "strict_ordinal_date"), + STRICT_ORDINAL_DATE_TIME("strictOrdinalDateTime", "strict_ordinal_date_time"), + STRICT_ORDINAL_DATE_TIME_NO_MILLIS("strictOrdinalDateTimeNoMillis", "strict_ordinal_date_time_no_millis"), + STRICT_TIME("strictTime", "strict_time"), + STRICT_TIME_NO_MILLIS("strictTimeNoMillis", "strict_time_no_millis"), + STRICT_T_TIME("strictTTime", "strict_t_time"), + STRICT_T_TIME_NO_MILLIS("strictTTimeNoMillis", "strict_t_time_no_millis"), + STRICT_WEEK_DATE("strictWeekDate", "strict_week_date"), + STRICT_WEEK_DATE_TIME("strictWeekDateTime", "strict_week_date_time"), + STRICT_WEEK_DATE_TIME_NO_MILLIS("strictWeekDateTimeNoMillis", "strict_week_date_time_no_millis"), + STRICT_WEEKYEAR("strictWeekyear", "strict_weekyear"), + STRICT_WEEKYEAR_WEEK("strictWeekyearWeek", "strict_weekyear_week"), + STRICT_WEEKYEAR_WEEK_DAY("strictWeekyearWeekDay", "strict_weekyear_week_day"), + STRICT_YEAR("strictYear", "strict_year"), + STRICT_YEAR_MONTH("strictYearMonth", "strict_year_month"), + STRICT_YEAR_MONTH_DAY("strictYearMonthDay", "strict_year_month_day"); + + private static final Map ALL_NAMES = Arrays.stream(values()) + .filter(n -> n.camelCaseName != null) + .collect(Collectors.toMap(n -> n.camelCaseName, n -> n.snakeCaseName)); + + private final String camelCaseName; + private final String snakeCaseName; + + LegacyFormatNames(String camelCaseName, String snakeCaseName) { + this.camelCaseName = camelCaseName; + this.snakeCaseName = snakeCaseName; + } + + public static LegacyFormatNames forName(String format) { + for (var name : values()) { + if (name.matches(format)) { + return name; + } + } + return null; + } + + public boolean isCamelCase(String format) { + return format.equals(camelCaseName); + } + + public String getSnakeCaseName() { + return snakeCaseName; + } + + public boolean matches(String format) { + return format.equals(camelCaseName) || format.equals(snakeCaseName); + } + + public static String camelCaseToSnakeCase(String format) { + return ALL_NAMES.getOrDefault(format, format); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java index 187d148387dff..aa0e0c17a52b7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java @@ -14,6 +14,7 @@ import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.unit.Fuzziness; @@ -236,10 +237,10 @@ abstract static class Builder extends RuntimeField.Builder { @Override protected final RuntimeField createRuntimeField(MappingParserContext parserContext) { if (script.get() == null) { - return createRuntimeField(getParseFromSourceFactory()); + return createRuntimeField(getParseFromSourceFactory(), parserContext.indexVersionCreated()); } Factory factory = parserContext.scriptCompiler().compile(script.getValue(), scriptContext); - return createRuntimeField(factory); + return createRuntimeField(factory, parserContext.indexVersionCreated()); } @Override @@ -262,12 +263,26 @@ protected final RuntimeField createChildRuntimeField( } final RuntimeField createRuntimeField(Factory scriptFactory) { - AbstractScriptFieldType fieldType = createFieldType(name, scriptFactory, getScript(), meta()); + return createRuntimeField(scriptFactory, Version.CURRENT); + } + + final RuntimeField createRuntimeField(Factory scriptFactory, Version indexVersion) { + var fieldType = createFieldType(name, scriptFactory, getScript(), meta(), indexVersion); return new LeafRuntimeField(name, fieldType, getParameters()); } abstract AbstractScriptFieldType createFieldType(String name, Factory factory, Script script, Map meta); + AbstractScriptFieldType createFieldType( + String name, + Factory factory, + Script script, + Map meta, + Version supportedVersion + ) { + return createFieldType(name, factory, script, meta); + } + @Override protected List> getParameters() { List> parameters = new ArrayList<>(super.getParameters()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 873af063ae8e3..22ef8c1bc3b20 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -281,9 +281,9 @@ public Builder( } } - private DateFormatter buildFormatter() { + DateFormatter buildFormatter() { try { - return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); + return DateFormatter.forPattern(format.getValue(), indexCreatedVersion).withLocale(locale.getValue()); } catch (IllegalArgumentException e) { if (indexCreatedVersion.isLegacyIndexVersion()) { logger.warn(() -> "Error parsing format [" + format.getValue() + "] of legacy index, falling back to default", e); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java index 86a0d7d2c877f..f6da438283d9e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.search.Query; +import org.elasticsearch.Version; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateMathParser; @@ -87,13 +88,24 @@ protected List> getParameters() { } @Override - AbstractScriptFieldType createFieldType(String name, DateFieldScript.Factory factory, Script script, Map meta) { + AbstractScriptFieldType createFieldType( + String name, + DateFieldScript.Factory factory, + Script script, + Map meta, + Version supportedVersion + ) { String pattern = format.getValue() == null ? DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.pattern() : format.getValue(); Locale locale = this.locale.getValue() == null ? Locale.ROOT : this.locale.getValue(); - DateFormatter dateTimeFormatter = DateFormatter.forPattern(pattern).withLocale(locale); + DateFormatter dateTimeFormatter = DateFormatter.forPattern(pattern, supportedVersion).withLocale(locale); return new DateScriptFieldType(name, factory, dateTimeFormatter, script, meta); } + @Override + AbstractScriptFieldType createFieldType(String name, DateFieldScript.Factory factory, Script script, Map meta) { + return createFieldType(name, factory, script, meta, Version.CURRENT); + } + @Override DateFieldScript.Factory getParseFromSourceFactory() { return DateFieldScript.PARSE_FROM_SOURCE; @@ -105,10 +117,10 @@ DateFieldScript.Factory getCompositeLeafFactory(Function formatParam = (FieldMapper.Parameter) builder.getParameters()[3]; + formatParam.parse("date_time_format", mock(MappingParserContext.class), "strictDateOptionalTime"); + builder.buildFormatter(); // shouldn't throw exception + + formatParam.parse("date_time_format", mock(MappingParserContext.class), "strictDateOptionalTime||strictDateOptionalTimeNanos"); + builder.buildFormatter(); // shouldn't throw exception + + DateFieldMapper.Builder newFieldBuilder = new DateFieldMapper.Builder( + "format", + DateFieldMapper.Resolution.MILLISECONDS, + null, + mock(ScriptService.class), + true, + Version.CURRENT + ); + + @SuppressWarnings("unchecked") + final FieldMapper.Parameter newFormatParam = (FieldMapper.Parameter) newFieldBuilder.getParameters()[3]; + + // Check that we don't allow the use of camel case date formats on 8.x indices + assertEquals( + "Error parsing [format] on field [format]: Invalid format: [strictDateOptionalTime]: Unknown pattern letter: t", + expectThrows(IllegalArgumentException.class, () -> { + newFormatParam.parse("date_time_format", mock(MappingParserContext.class), "strictDateOptionalTime"); + assertEquals("strictDateOptionalTime", newFormatParam.getValue()); + newFieldBuilder.buildFormatter(); + }).getMessage() + ); + + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java index 8fa5b95c93b98..cc5537decf2ad 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java @@ -431,6 +431,21 @@ public void testTermsQuery() throws IOException { } } + public void testLegacyDateFormatName() throws IOException { + CheckedSupplier mapping = () -> runtimeFieldMapping(b -> { + minimalMapping(b); + b.field("format", "strictDateOptionalTime"); + }); + // Check that we can correctly use the camel case date format for 7.x indices + createMapperService(Version.fromId(7_99_99_99), mapping.get()); // no exception thrown + + // Check that we don't allow the use of camel case date formats on 8.x indices + assertEquals( + "Failed to parse mapping: Invalid format: [strictDateOptionalTime]: Unknown pattern letter: t", + expectThrows(MapperParsingException.class, () -> { createMapperService(mapping.get()); }).getMessage() + ); + } + @Override protected Query randomTermsQuery(MappedFieldType ft, SearchExecutionContext ctx) { return ft.termsQuery(randomList(1, 100, DateScriptFieldTypeTests::randomDate), ctx); diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index 9d34d98d5ffd4..5e7e90e37d327 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -99,7 +99,8 @@ private DeprecationChecks() {} IndexDeprecationChecks::translogRetentionSettingCheck, IndexDeprecationChecks::checkIndexDataPath, IndexDeprecationChecks::storeTypeSettingCheck, - IndexDeprecationChecks::frozenIndexSettingCheck + IndexDeprecationChecks::frozenIndexSettingCheck, + IndexDeprecationChecks::deprecatedCamelCasePattern ); /** diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java index f41f4481ba74b..75b1bff558fb4 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java @@ -8,6 +8,9 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MappingMetadata; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.LegacyFormatNames; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.frozen.FrozenEngine; @@ -17,6 +20,10 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; /** * Index-specific deprecation checks @@ -115,4 +122,134 @@ static DeprecationIssue frozenIndexSettingCheck(IndexMetadata indexMetadata) { } return null; } + + private static void fieldLevelMappingIssue(IndexMetadata indexMetadata, BiConsumer> checker) { + if (indexMetadata.mapping() != null) { + Map sourceAsMap = indexMetadata.mapping().sourceAsMap(); + checker.accept(indexMetadata.mapping(), sourceAsMap); + } + } + + /** + * iterates through the "properties" field of mappings and returns any predicates that match in the + * form of issue-strings. + * + * @param type the document type + * @param parentMap the mapping to read properties from + * @param predicate the predicate to check against for issues, issue is returned if predicate evaluates to true + * @param fieldFormatter a function that takes a type and mapping field entry and returns a formatted field representation + * @return a list of issues found in fields + */ + @SuppressWarnings("unchecked") + static List findInPropertiesRecursively( + String type, + Map parentMap, + Function, Boolean> predicate, + BiFunction, String> fieldFormatter, + String fieldBeginMarker, + String fieldEndMarker + ) { + List issues = new ArrayList<>(); + Map properties = (Map) parentMap.get("properties"); + if (properties == null) { + return issues; + } + for (Map.Entry entry : properties.entrySet()) { + Map valueMap = (Map) entry.getValue(); + if (predicate.apply(valueMap)) { + issues.add(fieldBeginMarker + fieldFormatter.apply(type, entry) + fieldEndMarker); + } + + Map values = (Map) valueMap.get("fields"); + if (values != null) { + for (Map.Entry multifieldEntry : values.entrySet()) { + Map multifieldValueMap = (Map) multifieldEntry.getValue(); + if (predicate.apply(multifieldValueMap)) { + issues.add( + fieldBeginMarker + + fieldFormatter.apply(type, entry) + + ", multifield: " + + multifieldEntry.getKey() + + fieldEndMarker + ); + } + if (multifieldValueMap.containsKey("properties")) { + issues.addAll( + findInPropertiesRecursively( + type, + multifieldValueMap, + predicate, + fieldFormatter, + fieldBeginMarker, + fieldEndMarker + ) + ); + } + } + } + if (valueMap.containsKey("properties")) { + issues.addAll(findInPropertiesRecursively(type, valueMap, predicate, fieldFormatter, fieldBeginMarker, fieldEndMarker)); + } + } + + return issues; + } + + static DeprecationIssue deprecatedCamelCasePattern(IndexMetadata indexMetadata) { + List fields = new ArrayList<>(); + fieldLevelMappingIssue( + indexMetadata, + ((mappingMetadata, sourceAsMap) -> fields.addAll( + findInPropertiesRecursively( + mappingMetadata.type(), + sourceAsMap, + IndexDeprecationChecks::isDateFieldWithCamelCasePattern, + IndexDeprecationChecks::changeFormatToSnakeCase, + "", + "" + ) + )) + ); + + if (fields.size() > 0) { + String detailsMessageBeginning = fields.stream().collect(Collectors.joining(" ")); + return new DeprecationIssue( + DeprecationIssue.Level.CRITICAL, + "Date fields use deprecated camel case formats", + "https://ela.st/es-deprecation-7-camel-case-format", + detailsMessageBeginning, + false, + null + ); + } + return null; + } + + private static boolean isDateFieldWithCamelCasePattern(Map property) { + if ("date".equals(property.get("type")) && property.containsKey("format")) { + List patterns = DateFormatter.splitCombinedPatterns((String) property.get("format")); + for (String pattern : patterns) { + LegacyFormatNames format = LegacyFormatNames.forName(pattern); + return format != null && format.isCamelCase(pattern); + } + } + return false; + } + + private static String changeFormatToSnakeCase(String type, Map.Entry entry) { + Map value = (Map) entry.getValue(); + final String formatFieldValue = (String) value.get("format"); + List patterns = DateFormatter.splitCombinedPatterns(formatFieldValue); + StringBuilder sb = new StringBuilder( + "Convert [" + entry.getKey() + "] format [" + formatFieldValue + "] " + "which contains deprecated camel case to snake case. " + ); + for (String pattern : patterns) { + LegacyFormatNames format = LegacyFormatNames.forName(pattern); + if (format != null && format.isCamelCase(pattern)) { + sb.append("[" + pattern + "] to [" + format.getSnakeCaseName() + "]. "); + } + } + sb.deleteCharAt(sb.length() - 1); + return sb.toString(); + } } diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 4cddab7e21d9a..3d5f0dcecc713 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -16,11 +16,13 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; +import java.io.IOException; import java.util.List; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.collection.IsIterableContainingInOrder.contains; public class IndexDeprecationChecksTests extends ESTestCase { @@ -145,4 +147,34 @@ public void testFrozenIndex() { ) ); } + + public void testCamelCaseDeprecation() throws IOException { + String simpleMapping = "{\n\"_doc\": {" + + "\"properties\" : {\n" + + " \"date_time_field\" : {\n" + + " \"type\" : \"date\",\n" + + " \"format\" : \"strictDateOptionalTime\"\n" + + " }\n" + + " }" + + "} }"; + + IndexMetadata simpleIndex = IndexMetadata.builder(randomAlphaOfLengthBetween(5, 10)) + .settings(settings(Version.V_7_0_0)) + .numberOfShards(1) + .numberOfReplicas(1) + .putMapping(simpleMapping) + .build(); + + DeprecationIssue expected = new DeprecationIssue( + DeprecationIssue.Level.CRITICAL, + "Date fields use deprecated camel case formats", + "https://ela.st/es-deprecation-7-camel-case-format", + "Convert [date_time_field] format [strictDateOptionalTime] " + + "which contains deprecated camel case to snake case. [strictDateOptionalTime] to [strict_date_optional_time].", + false, + null + ); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(simpleIndex)); + assertThat(issues, hasItem(expected)); + } } From 82ad45f411b75cf71060b64f8e7ea99bb2a74d7c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 16 Aug 2022 16:17:45 -0400 Subject: [PATCH 218/265] TSDB: Build `_id` without reparsing (#88789) This replaces the code that build `_id` in tsid indices that used to re-parse the entire json object with one that reuses the parsed values. It speed up writes by about 4%. Here's the rally output: ``` | Min Throughput | 8164.67 | 8547.24 | docs/s | +4.69% | | Mean Throughput | 8891.11 | 9256.75 | docs/s | +4.11% | | Median Throughput | 8774.52 | 9134.15 | docs/s | +4.10% | | Max Throughput | 10246.7 | 10482.3 | docs/s | +2.30% | ``` --- docs/changelog/88789.yaml | 5 + .../cluster/routing/IndexRouting.java | 147 ++++++++++-------- .../org/elasticsearch/index/IndexMode.java | 10 +- .../index/mapper/DocumentParserContext.java | 2 +- .../index/mapper/TimeSeriesIdFieldMapper.java | 19 ++- .../mapper/TsidExtractingIdFieldMapper.java | 16 +- .../elasticsearch/search/DocValueFormat.java | 2 +- .../cluster/routing/IndexRoutingTests.java | 10 +- .../timeseries/TimeSeriesAggregatorTests.java | 2 +- 9 files changed, 134 insertions(+), 79 deletions(-) create mode 100644 docs/changelog/88789.yaml diff --git a/docs/changelog/88789.yaml b/docs/changelog/88789.yaml new file mode 100644 index 0000000000000..7ef280ef7793d --- /dev/null +++ b/docs/changelog/88789.yaml @@ -0,0 +1,5 @@ +pr: 88789 +summary: "TSDB: Build `_id` without reparsing" +area: "TSDB" +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index f9eec410ed9f7..11440ffeca90a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -35,6 +35,7 @@ import java.util.Map; import java.util.Set; import java.util.function.IntConsumer; +import java.util.function.IntSupplier; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -251,26 +252,33 @@ public void process(IndexRequest indexRequest) {} public int indexShard(String id, @Nullable String routing, XContentType sourceType, BytesReference source) { assert Transports.assertNotTransportThread("parsing the _source can get slow"); checkNoRouting(routing); - return hashToShardId(hashSource(sourceType, source)); + return hashToShardId(hashSource(sourceType, source).buildHash(IndexRouting.ExtractFromSource::defaultOnEmpty)); } public String createId(XContentType sourceType, BytesReference source, byte[] suffix) { - return createId(hashSource(sourceType, source), suffix); + return hashSource(sourceType, source).createId(suffix, IndexRouting.ExtractFromSource::defaultOnEmpty); } public String createId(Map flat, byte[] suffix) { - return createId(hashSource(flat), suffix); + Builder b = builder(); + for (Map.Entry e : flat.entrySet()) { + if (Regex.simpleMatch(routingPaths, e.getKey())) { + b.hashes.add(new NameAndHash(new BytesRef(e.getKey()), hash(new BytesRef(e.getValue().toString())))); + } + } + return b.createId(suffix, IndexRouting.ExtractFromSource::defaultOnEmpty); } - private static String createId(int routingHash, byte[] suffix) { - byte[] idBytes = new byte[4 + suffix.length]; - ByteUtils.writeIntLE(routingHash, idBytes, 0); - System.arraycopy(suffix, 0, idBytes, 4, suffix.length); - return Base64.getUrlEncoder().withoutPadding().encodeToString(idBytes); + private static int defaultOnEmpty() { + throw new IllegalArgumentException("Error extracting routing: source didn't contain any routing fields"); } - private int hashSource(XContentType sourceType, BytesReference source) { - List hashes = new ArrayList<>(); + public Builder builder() { + return new Builder(); + } + + private Builder hashSource(XContentType sourceType, BytesReference source) { + Builder b = builder(); try { try (XContentParser parser = sourceType.xContent().createParser(parserConfig, source.streamInput())) { parser.nextToken(); // Move to first token @@ -278,82 +286,89 @@ private int hashSource(XContentType sourceType, BytesReference source) { throw new IllegalArgumentException("Error extracting routing: source didn't contain any routing fields"); } parser.nextToken(); - extractObject(hashes, null, parser); + b.extractObject(null, parser); ensureExpectedToken(null, parser.nextToken(), parser); } } catch (IOException | ParsingException e) { throw new IllegalArgumentException("Error extracting routing: " + e.getMessage(), e); } - return hashesToHash(hashes); + return b; } - private static void extractObject(List hashes, @Nullable String path, XContentParser source) throws IOException { - while (source.currentToken() != Token.END_OBJECT) { - ensureExpectedToken(Token.FIELD_NAME, source.currentToken(), source); - String fieldName = source.currentName(); - String subPath = path == null ? fieldName : path + "." + fieldName; - source.nextToken(); - extractItem(hashes, subPath, source); + public class Builder { + private final List hashes = new ArrayList<>(); + + public void addMatching(String fieldName, BytesRef string) { + if (Regex.simpleMatch(routingPaths, fieldName)) { + hashes.add(new NameAndHash(new BytesRef(fieldName), hash(string))); + } } - } - private static void extractItem(List hashes, String path, XContentParser source) throws IOException { - switch (source.currentToken()) { - case START_OBJECT: - source.nextToken(); - extractObject(hashes, path, source); - source.nextToken(); - break; - case VALUE_STRING: - hashes.add(new NameAndHash(new BytesRef(path), hash(new BytesRef(source.text())))); - source.nextToken(); - break; - case VALUE_NULL: + public String createId(byte[] suffix, IntSupplier onEmpty) { + byte[] idBytes = new byte[4 + suffix.length]; + ByteUtils.writeIntLE(buildHash(onEmpty), idBytes, 0); + System.arraycopy(suffix, 0, idBytes, 4, suffix.length); + return Base64.getUrlEncoder().withoutPadding().encodeToString(idBytes); + } + + private void extractObject(@Nullable String path, XContentParser source) throws IOException { + while (source.currentToken() != Token.END_OBJECT) { + ensureExpectedToken(Token.FIELD_NAME, source.currentToken(), source); + String fieldName = source.currentName(); + String subPath = path == null ? fieldName : path + "." + fieldName; source.nextToken(); - break; - default: - throw new ParsingException( - source.getTokenLocation(), - "Routing values must be strings but found [{}]", - source.currentToken() - ); + extractItem(subPath, source); + } } - } - private int hashSource(Map flat) { - List hashes = new ArrayList<>(); - for (Map.Entry e : flat.entrySet()) { - if (Regex.simpleMatch(routingPaths, e.getKey())) { - hashes.add(new NameAndHash(new BytesRef(e.getKey()), hash(new BytesRef(e.getValue().toString())))); + private void extractItem(String path, XContentParser source) throws IOException { + switch (source.currentToken()) { + case START_OBJECT: + source.nextToken(); + extractObject(path, source); + source.nextToken(); + break; + case VALUE_STRING: + hashes.add(new NameAndHash(new BytesRef(path), hash(new BytesRef(source.text())))); + source.nextToken(); + break; + case VALUE_NULL: + source.nextToken(); + break; + default: + throw new ParsingException( + source.getTokenLocation(), + "Routing values must be strings but found [{}]", + source.currentToken() + ); + } + } + + private int buildHash(IntSupplier onEmpty) { + Collections.sort(hashes); + Iterator itr = hashes.iterator(); + if (itr.hasNext() == false) { + return onEmpty.getAsInt(); } + NameAndHash prev = itr.next(); + int hash = hash(prev.name) ^ prev.hash; + while (itr.hasNext()) { + NameAndHash next = itr.next(); + if (prev.name.equals(next.name)) { + throw new IllegalArgumentException("Duplicate routing dimension for [" + next.name + "]"); + } + int thisHash = hash(next.name) ^ next.hash; + hash = 31 * hash + thisHash; + prev = next; + } + return hash; } - return hashesToHash(hashes); } private static int hash(BytesRef ref) { return StringHelper.murmurhash3_x86_32(ref, 0); } - private static int hashesToHash(List hashes) { - Collections.sort(hashes); - Iterator itr = hashes.iterator(); - if (itr.hasNext() == false) { - throw new IllegalArgumentException("Error extracting routing: source didn't contain any routing fields"); - } - NameAndHash prev = itr.next(); - int hash = hash(prev.name) ^ prev.hash; - while (itr.hasNext()) { - NameAndHash next = itr.next(); - if (prev.name.equals(next.name)) { - throw new IllegalArgumentException("Duplicate routing dimension for [" + next.name + "]"); - } - int thisHash = hash(next.name) ^ next.hash; - hash = 31 * hash + thisHash; - prev = next; - } - return hash; - } - @Override public int updateShard(String id, @Nullable String routing) { throw new IllegalArgumentException(error("update")); diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 7bfe6e48d3a1f..71a824535392e 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -10,6 +10,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; +import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -101,7 +102,7 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { } @Override - public DocumentDimensions buildDocumentDimensions() { + public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { return new DocumentDimensions.OnlySingleValueAllowed(); } @@ -186,8 +187,9 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { } @Override - public DocumentDimensions buildDocumentDimensions() { - return new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(); + public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { + IndexRouting.ExtractFromSource routing = (IndexRouting.ExtractFromSource) settings.getIndexRouting(); + return new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(routing.builder()); } @Override @@ -301,7 +303,7 @@ public String getName() { /** * How {@code time_series_dimension} fields are handled by indices in this mode. */ - public abstract DocumentDimensions buildDocumentDimensions(); + public abstract DocumentDimensions buildDocumentDimensions(IndexSettings settings); /** * @return Whether timestamps should be validated for being withing the time range of an index. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index da4b8673c362b..600f45e83958d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -127,7 +127,7 @@ protected DocumentParserContext( this.newFieldsSeen = new HashSet<>(); this.dynamicObjectMappers = new HashMap<>(); this.dynamicRuntimeFields = new ArrayList<>(); - this.dimensions = indexSettings.getMode().buildDocumentDimensions(); + this.dimensions = indexSettings.getMode().buildDocumentDimensions(indexSettings); } public final IndexSettings indexSettings() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index 2826928274241..dd8bf4f9ef2e9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -12,11 +12,13 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.ByteBlockPool; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.FieldDataContext; @@ -145,7 +147,7 @@ public void postParse(DocumentParserContext context) throws IOException { TimeSeriesIdBuilder timeSeriesIdBuilder = (TimeSeriesIdBuilder) context.getDimensions(); BytesRef timeSeriesId = timeSeriesIdBuilder.build().toBytesRef(); context.doc().add(new SortedDocValuesField(fieldType().name(), timeSeriesId)); - TsidExtractingIdFieldMapper.createField(context, timeSeriesId); + TsidExtractingIdFieldMapper.createField(context, timeSeriesIdBuilder.routingBuilder, timeSeriesId); } @Override @@ -190,6 +192,15 @@ public static class TimeSeriesIdBuilder implements DocumentDimensions { * to build the _tsid field for the document. */ private final SortedMap dimensions = new TreeMap<>(); + /** + * Builds the routing. Used for building {@code _id}. If null then skipped. + */ + @Nullable + private final IndexRouting.ExtractFromSource.Builder routingBuilder; + + public TimeSeriesIdBuilder(@Nullable IndexRouting.ExtractFromSource.Builder routingBuilder) { + this.routingBuilder = routingBuilder; + } public BytesReference build() throws IOException { if (dimensions.isEmpty()) { @@ -228,7 +239,7 @@ public void addString(String fieldName, String value) { out.write((byte) 's'); /* * Write in utf8 instead of StreamOutput#writeString which is utf-16-ish - * so its easier for folks to reason about the space taken up. Mostly + * so it's easier for folks to reason about the space taken up. Mostly * it'll be smaller too. */ BytesRef bytes = new BytesRef(value); @@ -239,6 +250,10 @@ public void addString(String fieldName, String value) { } out.writeBytesRef(bytes); add(fieldName, out.bytes()); + + if (routingBuilder != null) { + routingBuilder.addMatching(fieldName, bytes); + } } catch (IOException e) { throw new IllegalArgumentException("Dimension field cannot be serialized.", e); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java index 0d89120cd2803..d5e74a432cc17 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java @@ -108,7 +108,7 @@ private TsidExtractingIdFieldMapper() { private static final long SEED = 0; - public static void createField(DocumentParserContext context, BytesRef tsid) { + public static void createField(DocumentParserContext context, IndexRouting.ExtractFromSource.Builder routingBuilder, BytesRef tsid) { IndexableField[] timestampFields = context.rootDoc().getFields(DataStreamTimestampFieldMapper.DEFAULT_PATH); if (timestampFields.length == 0) { throw new IllegalArgumentException( @@ -125,8 +125,15 @@ public static void createField(DocumentParserContext context, BytesRef tsid) { ByteUtils.writeLongBE(timestamp, suffix, 8); // Big Ending shrinks the inverted index by ~37% IndexRouting.ExtractFromSource indexRouting = (IndexRouting.ExtractFromSource) context.indexSettings().getIndexRouting(); - // TODO it'd be way faster to use the fields that we've extract here rather than the source or parse the tsid - String id = indexRouting.createId(context.sourceToParse().getXContentType(), context.sourceToParse().source(), suffix); + String id = routingBuilder.createId(suffix, () -> { + if (context.getDynamicMappers().isEmpty() == false) { + throw new IllegalStateException( + "Didn't find any fields to include in the routing which would be fine if there are" + + " dynamic mapping waiting but we couldn't find any of those either!" + ); + } + return 0; + }); assert Uid.isURLBase64WithoutPadding(id); // Make sure we get to use Uid's nice optimizations /* * Make sure that _id from extracting the tsid matches that _id @@ -140,6 +147,9 @@ public static void createField(DocumentParserContext context, BytesRef tsid) { assert context.getDynamicMappers().isEmpty() == false || context.getDynamicRuntimeFields().isEmpty() == false || id.equals(indexRouting.createId(TimeSeriesIdFieldMapper.decodeTsid(tsid), suffix)); + assert context.getDynamicMappers().isEmpty() == false + || context.getDynamicRuntimeFields().isEmpty() == false + || id.equals(indexRouting.createId(context.sourceToParse().getXContentType(), context.sourceToParse().source(), suffix)); if (context.sourceToParse().id() != null && false == context.sourceToParse().id().equals(id)) { throw new IllegalArgumentException( diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 488910a9f700d..ccb5743edd427 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -704,7 +704,7 @@ public BytesRef parseBytesRef(Object value) { } Map m = (Map) value; - TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(); + TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(null); for (Map.Entry entry : m.entrySet()) { String f = entry.getKey().toString(); Object v = entry.getValue(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java index c40283c128486..04f865e1ef8dc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTests.java @@ -644,8 +644,16 @@ private void assertIndexShard(IndexRouting routing, Map source, IndexRouting.ExtractFromSource r = (IndexRouting.ExtractFromSource) routing; String idFromSource = r.createId(XContentType.JSON, sourceBytes, suffix); assertThat(shardIdForReadFromSourceExtracting(routing, idFromSource), equalTo(expectedShard)); - String idFromFlattened = r.createId(flatten(source), suffix); + Map flattened = flatten(source); + String idFromFlattened = r.createId(flattened, suffix); assertThat(idFromFlattened, equalTo(idFromSource)); + + IndexRouting.ExtractFromSource.Builder b = r.builder(); + for (Map.Entry e : flattened.entrySet()) { + b.addMatching(e.getKey(), new BytesRef(e.getValue().toString())); + } + String idFromBuilder = b.createId(suffix, () -> { throw new AssertionError(); }); + assertThat(idFromBuilder, equalTo(idFromSource)); } private byte[] randomSuffix() { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesAggregatorTests.java index f105b77b67ad3..cebc832c057d9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/timeseries/TimeSeriesAggregatorTests.java @@ -77,7 +77,7 @@ public void testStandAloneTimeSeriesWithSum() throws IOException { public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimensions, Object[] metrics) throws IOException { final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); - final TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(); + final TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(null); for (int i = 0; i < dimensions.length; i += 2) { if (dimensions[i + 1]instanceof Number n) { builder.addLong(dimensions[i].toString(), n.longValue()); From dc672b0e6588b915ca8cc530be38dbd3f5682263 Mon Sep 17 00:00:00 2001 From: Nikola Grcevski <6207777+grcevski@users.noreply.github.com> Date: Tue, 16 Aug 2022 17:18:18 -0400 Subject: [PATCH 219/265] Handle snapshot restore in file settings (#89321) --- docs/changelog/89321.yaml | 5 + .../service/FileSettingsServiceIT.java | 2 +- .../service/SnaphotsAndFileSettingsIT.java | 331 ++++++++++++++++++ .../cluster/metadata/Metadata.java | 12 +- .../metadata/ReservedStateMetadata.java | 12 + .../java/org/elasticsearch/node/Node.java | 16 +- .../ReservedClusterStateHandler.java | 2 - .../service/FileSettingsService.java | 75 +++- .../service/ReservedClusterStateService.java | 37 -- .../service/ReservedStateUpdateTask.java | 50 +++ .../snapshots/RestoreService.java | 8 +- .../service/FileSettingsServiceTests.java | 2 +- .../ReservedClusterStateServiceTests.java | 25 +- .../snapshots/SnapshotResiliencyTests.java | 4 +- 14 files changed, 505 insertions(+), 76 deletions(-) create mode 100644 docs/changelog/89321.yaml create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnaphotsAndFileSettingsIT.java diff --git a/docs/changelog/89321.yaml b/docs/changelog/89321.yaml new file mode 100644 index 0000000000000..6680e0516ab0f --- /dev/null +++ b/docs/changelog/89321.yaml @@ -0,0 +1,5 @@ +pr: 89321 +summary: Handle snapshot restore in file settings +area: Infra/Core +type: bug +issues: [89183] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index 708d9226c8d4d..4c10544e2a555 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -45,7 +45,7 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class FileSettingsServiceIT extends ESIntegTestCase { - private AtomicLong versionCounter = new AtomicLong(1); + private static AtomicLong versionCounter = new AtomicLong(1); private static String testJSON = """ { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnaphotsAndFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnaphotsAndFileSettingsIT.java new file mode 100644 index 0000000000000..1ef44347cc4ff --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnaphotsAndFileSettingsIT.java @@ -0,0 +1,331 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.reservedstate.service; + +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; +import org.elasticsearch.cluster.metadata.ReservedStateMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; +import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; +import org.elasticsearch.snapshots.SnapshotState; +import org.junit.After; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests that snapshot restore behaves correctly when we have file based settings that reserve part of the + * cluster state + */ +public class SnaphotsAndFileSettingsIT extends AbstractSnapshotIntegTestCase { + private static AtomicLong versionCounter = new AtomicLong(1); + + private static String testFileSettingsJSON = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "cluster_settings": { + "indices.recovery.max_bytes_per_sec": "50mb" + } + } + }"""; + + private static String emptyFileSettingsJSON = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "cluster_settings": {} + } + }"""; + + @After + public void cleanUp() throws Exception { + awaitNoMoreRunningOperations(); + } + + private void writeJSONFile(String node, String json) throws Exception { + long version = versionCounter.incrementAndGet(); + + FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); + + Files.createDirectories(fileSettingsService.operatorSettingsDir()); + Path tempFilePath = createTempFile(); + + Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); + Files.move(tempFilePath, fileSettingsService.operatorSettingsFile(), StandardCopyOption.ATOMIC_MOVE); + } + + private CountDownLatch setupClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null) { + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedClusterSettingsAction.NAME); + if (handlerMetadata == null) { + fail("Should've found cluster settings in this metadata"); + } + if (handlerMetadata.keys().contains("indices.recovery.max_bytes_per_sec")) { + clusterService.removeListener(this); + savedClusterState.countDown(); + } + } + } + }); + + return savedClusterState; + } + + private ClusterStateResponse assertClusterStateSaveOK(CountDownLatch savedClusterState) throws Exception { + boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + return clusterAdmin().state(new ClusterStateRequest()).actionGet(); + } + + public void testRestoreWithRemovedFileSettings() throws Exception { + try { + createRepository("test-repo", "fs"); + + logger.info("--> set some persistent cluster settings"); + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(25)) + .build() + ) + ); + + ensureGreen(); + + String masterNode = internalCluster().getMasterName(); + + var savedClusterState = setupClusterStateListener(masterNode); + FileSettingsService fs = internalCluster().getInstance(FileSettingsService.class, masterNode); + + logger.info("--> write some file based settings, putting some reserved state"); + writeJSONFile(masterNode, testFileSettingsJSON); + final ClusterStateResponse savedStateResponse = assertClusterStateSaveOK(savedClusterState); + assertThat( + savedStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), + equalTo("50mb") + ); + + logger.info("--> create full snapshot"); + createFullSnapshot("test-repo", "test-snap"); + assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(55)) + .build() + ) + ); + + logger.info("--> deleting operator file, no file based settings"); + Files.delete(fs.operatorSettingsFile()); + + logger.info("--> restore global state from the snapshot"); + clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); + + ensureGreen(); + + final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest().metadata(true)).actionGet(); + + // We expect no reserved metadata state for file based settings, the operator file was deleted. + assertNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); + + final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( + ClusterGetSettingsAction.INSTANCE, + new ClusterGetSettingsAction.Request() + ).actionGet(); + + assertThat( + getSettingsResponse.persistentSettings().get(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey()), + equalTo("25s") + ); + // We didn't remove the setting set by file settings, we simply removed the reserved (operator) section. + assertThat(getSettingsResponse.persistentSettings().get("indices.recovery.max_bytes_per_sec"), equalTo("50mb")); + } finally { + // cleanup + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), (String) null) + .put("indices.recovery.max_bytes_per_sec", (String) null) + .build() + ) + ); + } + } + + private CountDownLatch removedReservedClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null && reservedState.version() == 0L) { + clusterService.removeListener(this); + savedClusterState.countDown(); + } + } + }); + + return savedClusterState; + } + + private CountDownLatch cleanedClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null) { + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedClusterSettingsAction.NAME); + if (handlerMetadata == null) { + fail("Should've found cluster settings in this metadata"); + } + if (handlerMetadata.keys().isEmpty()) { + clusterService.removeListener(this); + savedClusterState.countDown(); + } + } + } + }); + + return savedClusterState; + } + + public void testRestoreWithPersistedFileSettings() throws Exception { + try { + createRepository("test-repo", "fs"); + + logger.info("--> set some persistent cluster settings"); + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(25)) + .build() + ) + ); + + ensureGreen(); + + String masterNode = internalCluster().getMasterName(); + + var savedClusterState = setupClusterStateListener(masterNode); + FileSettingsService fs = internalCluster().getInstance(FileSettingsService.class, masterNode); + + logger.info("--> write some file based settings, putting some reserved state"); + writeJSONFile(masterNode, testFileSettingsJSON); + final ClusterStateResponse savedStateResponse = assertClusterStateSaveOK(savedClusterState); + assertThat( + savedStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), + equalTo("50mb") + ); + + logger.info("--> create full snapshot"); + createFullSnapshot("test-repo", "test-snap"); + assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(55)) + .build() + ) + ); + + logger.info("--> restore global state from the snapshot"); + var removedReservedState = removedReservedClusterStateListener(masterNode); + var restoredReservedState = setupClusterStateListener(masterNode); + + clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); + + ensureGreen(); + + // When the target cluster of a restore has an existing operator file, we don't un-reserve the reserved + // cluster state for file based settings, but instead we reset the version to 0 and 'touch' the operator file + // so that it gets re-processed. + logger.info("--> reserved state version will be reset to 0, because of snapshot restore"); + assertTrue(removedReservedState.await(20, TimeUnit.SECONDS)); + + logger.info("--> reserved state would be restored"); + assertTrue(restoredReservedState.await(20, TimeUnit.SECONDS)); + + final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest().metadata(true)).actionGet(); + + assertNotNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); + + final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( + ClusterGetSettingsAction.INSTANCE, + new ClusterGetSettingsAction.Request() + ).actionGet(); + + assertThat( + getSettingsResponse.persistentSettings().get(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey()), + equalTo("25s") + ); + + // we need to remove the reserved state, so that clean-up can happen + var cleanupReservedState = cleanedClusterStateListener(masterNode); + + logger.info("--> clear the file based settings"); + writeJSONFile(masterNode, emptyFileSettingsJSON); + assertClusterStateSaveOK(cleanupReservedState); + } finally { + // cleanup + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), (String) null) + .put("indices.recovery.max_bytes_per_sec", (String) null) + .build() + ) + ); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 3a786caf6b563..c71eb2cc685dd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1770,7 +1770,7 @@ public Builder put(Map reservedStateMetadata) { /** * Adds a {@link ReservedStateMetadata} for a given namespace to the metadata builder - * @param metadata an {@link ReservedStateMetadata} + * @param metadata a {@link ReservedStateMetadata} * @return {@link Builder} */ public Builder put(ReservedStateMetadata metadata) { @@ -1778,6 +1778,16 @@ public Builder put(ReservedStateMetadata metadata) { return this; } + /** + * Removes a {@link ReservedStateMetadata} for a given namespace + * @param metadata a {@link ReservedStateMetadata} + * @return {@link Builder} + */ + public Builder removeReservedState(ReservedStateMetadata metadata) { + reservedStateMetadata.remove(metadata.namespace()); + return this; + } + public Builder indexGraveyard(final IndexGraveyard indexGraveyard) { putCustom(IndexGraveyard.TYPE, indexGraveyard); return this; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java index e738c26fe332c..d76297ba5b858 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java @@ -214,6 +214,18 @@ public Builder(String namespace) { this.errorMetadata = null; } + /** + * Creates an reserved state metadata builder + * + * @param metadata the previous metadata + */ + public Builder(ReservedStateMetadata metadata) { + this(metadata.namespace); + this.version = metadata.version; + this.handlers = new HashMap<>(metadata.handlers); + this.errorMetadata = metadata.errorMetadata; + } + /** * Creates an reserved state metadata builder * diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index f89e3969e2e2a..d1290aa4be6c3 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -822,6 +822,13 @@ protected Node( transportService, indicesService ); + + FileSettingsService fileSettingsService = new FileSettingsService( + clusterService, + actionModule.getReservedClusterStateService(), + environment + ); + RestoreService restoreService = new RestoreService( clusterService, repositoryService, @@ -831,7 +838,8 @@ protected Node( indexMetadataVerifier, shardLimitValidator, systemIndices, - indicesService + indicesService, + fileSettingsService ); final DiskThresholdMonitor diskThresholdMonitor = new DiskThresholdMonitor( settings, @@ -955,12 +963,6 @@ protected Node( ? LocalHealthMonitor.create(settings, clusterService, nodeService, threadPool) : null; - FileSettingsService fileSettingsService = new FileSettingsService( - clusterService, - actionModule.getReservedClusterStateService(), - environment - ); - modules.add(b -> { b.bind(Node.class).toInstance(this); b.bind(NodeService.class).toInstance(nodeService); diff --git a/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java b/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java index 47c412e0ea2f9..aa3cd6f4cd869 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java @@ -28,8 +28,6 @@ *

    */ public interface ReservedClusterStateHandler { - String CONTENT = "content"; - /** * Unique identifier for the handler. * diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index 7fcc6c8a5b9c1..35349b5ad041d 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -13,6 +13,8 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.env.Environment; @@ -27,6 +29,8 @@ import java.nio.file.WatchKey; import java.nio.file.WatchService; import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.FileTime; +import java.time.Instant; import java.util.concurrent.CountDownLatch; import java.util.function.Consumer; @@ -47,7 +51,7 @@ public class FileSettingsService extends AbstractLifecycleComponent implements C private static final Logger logger = LogManager.getLogger(FileSettingsService.class); private static final String SETTINGS_FILE_NAME = "settings.json"; - static final String NAMESPACE = "file_settings"; + public static final String NAMESPACE = "file_settings"; private final ClusterService clusterService; private final ReservedClusterStateService stateService; @@ -133,15 +137,67 @@ public void clusterChanged(ClusterChangedEvent event) { } private void startIfMaster(ClusterState clusterState) { - setWatching(currentNodeMaster(clusterState), initialState); + if (currentNodeMaster(clusterState)) { + startWatcher(clusterState, initialState); + } else { + stopWatcher(); + } initialState = false; } - private void setWatching(boolean watching, boolean initialState) { - if (watching) { - startWatcher(initialState); - } else { - stopWatcher(); + /** + * Used by snapshot restore service {@link org.elasticsearch.snapshots.RestoreService} to prepare the reserved + * state of the snapshot for the current cluster. + *

    + * If the current cluster where we are restoring the snapshot into has any operator file based settings, we'll + * reset the reserved state version to 0. + *

    + * If there's no file based settings file in this cluster, we'll remove all state reservations for + * file based settings from the cluster state. + * @param clusterState the cluster state before snapshot restore + * @param mdBuilder the current metadata builder for the new cluster state + */ + public void handleSnapshotRestore(ClusterState clusterState, Metadata.Builder mdBuilder) { + assert currentNodeMaster(clusterState); + + ReservedStateMetadata fileSettingsMetadata = clusterState.metadata().reservedStateMetadata().get(NAMESPACE); + + // When we restore from a snapshot we remove the reserved cluster state for file settings, + // since we don't know the current operator configuration, e.g. file settings could be disabled + // on the target cluster. If file settings exist and the cluster state has lost it's reserved + // state for the "file_settings" namespace, we touch our file settings file to cause it to re-process the file. + if (watching() && Files.exists(operatorSettingsFile())) { + if (fileSettingsMetadata != null) { + ReservedStateMetadata withResetVersion = new ReservedStateMetadata.Builder(fileSettingsMetadata).version(0L).build(); + mdBuilder.put(withResetVersion); + } + } else if (fileSettingsMetadata != null) { + mdBuilder.removeReservedState(fileSettingsMetadata); + } + } + + /** + * 'Touches' the settings file so the file watcher will re-processes it. + *

    + * The file processing is asynchronous, the cluster state or the file must be already updated such that + * the version information in the file is newer than what's already saved as processed in the + * cluster state. + * + * For snapshot restores we first must restore the snapshot and then force a refresh, since the cluster state + * metadata version must be reset to 0 and saved in the cluster state. + */ + private void refreshExistingFileStateIfNeeded(ClusterState clusterState) { + if (watching()) { + ReservedStateMetadata fileSettingsMetadata = clusterState.metadata().reservedStateMetadata().get(NAMESPACE); + // We check if the version was reset to 0, and force an update if a file exists. This can happen in situations + // like snapshot restores. + if (fileSettingsMetadata != null && fileSettingsMetadata.version() == 0L && Files.exists(operatorSettingsFile())) { + try { + Files.setLastModifiedTime(operatorSettingsFile(), FileTime.from(Instant.now())); + } catch (IOException e) { + logger.warn("encountered I/O error trying to update file settings timestamp", e); + } + } } } @@ -150,9 +206,10 @@ boolean watching() { return this.watchService != null; } - synchronized void startWatcher(boolean onStartup) { + synchronized void startWatcher(ClusterState clusterState, boolean onStartup) { if (watching() || active == false) { - // already watching or inactive, nothing to do + refreshExistingFileStateIfNeeded(clusterState); + return; } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java index 96b5d5597f4cd..3394d7b88af47 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; @@ -149,9 +148,6 @@ public void process(String namespace, ReservedStateChunk reservedStateChunk, Con ClusterState state = clusterService.state(); ReservedStateMetadata existingMetadata = state.metadata().reservedStateMetadata().get(namespace); - if (checkMetadataVersion(namespace, existingMetadata, reservedStateVersion) == false) { - return; - } clusterService.submitStateUpdateTask( "reserved cluster state [" + namespace + "]", @@ -183,39 +179,6 @@ public void onFailure(Exception e) { ); } - // package private for testing - static boolean checkMetadataVersion( - String namespace, - ReservedStateMetadata existingMetadata, - ReservedStateVersion reservedStateVersion - ) { - if (Version.CURRENT.before(reservedStateVersion.minCompatibleVersion())) { - logger.warn( - () -> format( - "Reserved cluster state version [%s] for namespace [%s] is not compatible with this Elasticsearch node", - reservedStateVersion.minCompatibleVersion(), - namespace - ) - ); - return false; - } - - if (existingMetadata != null && existingMetadata.version() >= reservedStateVersion.version()) { - logger.warn( - () -> format( - "Not updating reserved cluster state for namespace [%s], because version [%s] is less or equal" - + " to the current metadata version [%s]", - namespace, - reservedStateVersion.version(), - existingMetadata.version() - ) - ); - return false; - } - - return true; - } - // package private for testing static boolean isNewError(ReservedStateMetadata existingMetadata, Long newStateVersion) { return (existingMetadata == null diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java index 0631aee59cf6e..df5849c3ba4bc 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; @@ -79,6 +80,10 @@ protected ClusterState execute(final ClusterState currentState) { Map reservedState = stateChunk.state(); ReservedStateVersion reservedStateVersion = stateChunk.metadata(); + if (checkMetadataVersion(namespace, existingMetadata, reservedStateVersion) == false) { + return currentState; + } + var reservedMetadataBuilder = new ReservedStateMetadata.Builder(namespace).version(reservedStateVersion.version()); List errors = new ArrayList<>(); @@ -128,4 +133,49 @@ private Set keysForHandler(ReservedStateMetadata reservedStateMetadata, return reservedStateMetadata.handlers().get(handlerName).keys(); } + + static boolean checkMetadataVersion( + String namespace, + ReservedStateMetadata existingMetadata, + ReservedStateVersion reservedStateVersion + ) { + if (Version.CURRENT.before(reservedStateVersion.minCompatibleVersion())) { + logger.warn( + () -> format( + "Reserved cluster state version [%s] for namespace [%s] is not compatible with this Elasticsearch node", + reservedStateVersion.minCompatibleVersion(), + namespace + ) + ); + return false; + } + + // Version 0 is special, snapshot restores will reset to 0. + if (reservedStateVersion.version() <= 0L) { + logger.warn( + () -> format( + "Not updating reserved cluster state for namespace [%s], because version [%s] is less or equal to 0", + namespace, + reservedStateVersion.version(), + existingMetadata.version() + ) + ); + return false; + } + + if (existingMetadata != null && existingMetadata.version() >= reservedStateVersion.version()) { + logger.warn( + () -> format( + "Not updating reserved cluster state for namespace [%s], because version [%s] is less or equal" + + " to the current metadata version [%s]", + namespace, + reservedStateVersion.version(), + existingMetadata.version() + ) + ); + return false; + } + + return true; + } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 512fe1766133f..bb0587b756ca4 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -77,6 +77,7 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.reservedstate.service.FileSettingsService; import java.io.IOException; import java.util.ArrayList; @@ -183,6 +184,8 @@ public class RestoreService implements ClusterStateApplier { private final IndicesService indicesService; + private final FileSettingsService fileSettingsService; + private volatile boolean refreshRepositoryUuidOnRestore; public RestoreService( @@ -194,7 +197,8 @@ public RestoreService( IndexMetadataVerifier indexMetadataVerifier, ShardLimitValidator shardLimitValidator, SystemIndices systemIndices, - IndicesService indicesService + IndicesService indicesService, + FileSettingsService fileSettingsService ) { this.clusterService = clusterService; this.repositoriesService = repositoriesService; @@ -209,6 +213,7 @@ public RestoreService( this.shardLimitValidator = shardLimitValidator; this.systemIndices = systemIndices; this.indicesService = indicesService; + this.fileSettingsService = fileSettingsService; this.refreshRepositoryUuidOnRestore = REFRESH_REPO_UUID_ON_RESTORE_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(REFRESH_REPO_UUID_ON_RESTORE_SETTING, this::setRefreshRepositoryUuidOnRestore); @@ -1389,6 +1394,7 @@ && isSystemIndex(snapshotIndexMetadata) == false) { // Restore global state if needed if (request.includeGlobalState()) { applyGlobalStateRestore(currentState, mdBuilder); + fileSettingsService.handleSnapshotRestore(currentState, mdBuilder); } if (completed(shards)) { diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index 9db5bba768a3b..86d3bf76228a7 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -210,7 +210,7 @@ public void testInitialFile() throws Exception { }).when(stateService).process(any(), (XContentParser) any(), any()); service.start(); - service.startWatcher(true); + service.startWatcher(clusterService.state(), true); verify(service, times(1)).processFileSettings(any(), any()); diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index 2734e693e5773..2bece25b6d460 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -42,6 +42,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import static org.elasticsearch.reservedstate.service.ReservedStateUpdateTask.checkMetadataVersion; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -301,7 +302,7 @@ public Map fromXContent(XContentParser parser) throws IOExceptio ReservedStateHandlerMetadata hmOne = new ReservedStateHandlerMetadata("one", Set.of("a", "b")); ReservedStateErrorMetadata emOne = new ReservedStateErrorMetadata( - 1L, + 2L, ReservedStateErrorMetadata.ErrorKind.VALIDATION, List.of("Test error 1", "Test error 2") ); @@ -315,17 +316,17 @@ public Map fromXContent(XContentParser parser) throws IOExceptio Metadata metadata = Metadata.builder().put(operatorMetadata).build(); ClusterState state = ClusterState.builder(new ClusterName("test")).metadata(metadata).build(); + assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, 2L)); assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, 1L)); - assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, 0L)); - assertTrue(ReservedClusterStateService.isNewError(operatorMetadata, 2L)); - assertTrue(ReservedClusterStateService.isNewError(null, 0L)); + assertTrue(ReservedClusterStateService.isNewError(operatorMetadata, 3L)); + assertTrue(ReservedClusterStateService.isNewError(null, 1L)); // We submit a task with two handler, one will cause an exception, the other will create a new state. // When we fail to update the metadata because of version, we ensure that the returned state is equal to the // original state by pointer reference to avoid cluster state update task to run. ReservedStateUpdateTask task = new ReservedStateUpdateTask( "namespace_one", - new ReservedStateChunk(Map.of("one", "two", "maker", "three"), new ReservedStateVersion(1L, Version.CURRENT)), + new ReservedStateChunk(Map.of("one", "two", "maker", "three"), new ReservedStateVersion(2L, Version.CURRENT)), Map.of(exceptionThrower.name(), exceptionThrower, newStateMaker.name(), newStateMaker), List.of(exceptionThrower.name(), newStateMaker.name()), (errorState) -> { assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, errorState.version())); }, @@ -370,20 +371,12 @@ public void onFailure(Exception e) {} public void testCheckMetadataVersion() { ReservedStateMetadata operatorMetadata = ReservedStateMetadata.builder("test").version(123L).build(); - assertTrue( - ReservedClusterStateService.checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(124L, Version.CURRENT)) - ); + assertTrue(checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(124L, Version.CURRENT))); - assertFalse( - ReservedClusterStateService.checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(123L, Version.CURRENT)) - ); + assertFalse(checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(123L, Version.CURRENT))); assertFalse( - ReservedClusterStateService.checkMetadataVersion( - "operator", - operatorMetadata, - new ReservedStateVersion(124L, Version.fromId(Version.CURRENT.id + 1)) - ) + checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(124L, Version.fromId(Version.CURRENT.id + 1))) ); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 838e383d2457e..118ea581910a7 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -166,6 +166,7 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.script.ScriptCompiler; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchService; @@ -1932,7 +1933,8 @@ protected void assertSnapshotOrGenericThread() { new IndexMetadataVerifier(settings, namedXContentRegistry, mapperRegistry, indexScopedSettings, ScriptCompiler.NONE), shardLimitValidator, EmptySystemIndices.INSTANCE, - indicesService + indicesService, + mock(FileSettingsService.class) ); actions.put( PutMappingAction.INSTANCE, From acf9a674808c353516bab3e2b2b17f8daf6ae3ce Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 16 Aug 2022 15:28:32 -0700 Subject: [PATCH 220/265] Document kNN with aggregations (#89359) This commit adds a short note to the 'search your data' docs around kNN search to explain how approximate kNN works with aggregations: * Make section on 'hybrid retrieval' more general and include aggregations info * Remove an example response from the previous section on filtering, since this page was getting long --- .../search-your-data/knn-search.asciidoc | 40 +++---------------- 1 file changed, 6 insertions(+), 34 deletions(-) diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index c16fbff4cff41..91805a4b7e40e 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -279,41 +279,8 @@ POST image-index/_search ---- // TEST[continued] -[source,console-result] ----- -{ - "took": 5, - "timed_out": false, - "_shards": { - "total": 1, - "successful": 1, - "skipped": 0, - "failed": 0 - }, - "hits": { - "total": { - "value": 1, - "relation": "eq" - }, - "max_score": 0.003144654, - "hits": [ - { - "_index": "image-index", - "_id": "2", - "_score": 0.003144654, - "fields": { - "title": ["alpine lake"] - } - } - ] - } -} ----- -// TESTRESPONSE[s/"took": 5/"took": $body.took/] -// TESTRESPONSE[s/,\n \.\.\.//] - [discrete] -==== Combine approximate kNN and a query +==== Combine approximate kNN with other features You can perform 'hybrid retrieval' by providing both the <> and a <>: @@ -354,6 +321,11 @@ each score in the sum. In the example above, the scores will be calculated as score = 0.9 * match_score + 0.1 * knn_score ``` +The `knn` option can also be used with <>. In general, {es} computes aggregations +over all documents that match the search. So for approximate kNN search, aggregations are calculated on the top `k` +nearest documents. If the search also includes a `query`, then aggregations are calculated on the combined set of `knn` +and `query` matches. + [discrete] [[knn-indexing-considerations]] ==== Indexing considerations From 2841bf7646a90dc6835d963ae072f7bc637b1a02 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 17 Aug 2022 11:04:41 +1000 Subject: [PATCH 221/265] YAML tests and docs for viewing API key role descriptors (#89186) This PR expands existing YAML tests and docs for the new role_descriptors field returned in both Get and Query API key calls. Relates: #89166, #89058 --- .../rest-api/security/get-api-keys.asciidoc | 38 +- .../rest-api/security/query-api-key.asciidoc | 39 +- .../test/api_key/40_view_role_descriptors.yml | 332 ++++++++++++++++++ 3 files changed, 404 insertions(+), 5 deletions(-) create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/40_view_role_descriptors.yml diff --git a/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc index a1a7fe0097d3d..8f03bd01ede88 100644 --- a/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc @@ -15,7 +15,11 @@ Retrieves information for one or more API keys. [[security-api-get-api-key-prereqs]] ==== {api-prereq-title} -* To use this API, you must have at least the `manage_api_key` cluster privilege. +* To use this API, you must have at least the `manage_own_api_key` cluster privilege. +* If you have only the `manage_own_api_key` privilege, this API returns only +the API keys that you own. If you have the `manage_api_key` or greater +privileges (including `manage_security`), this API returns all API keys +regardless of ownership. [[security-api-get-api-key-desc]] ==== {api-description-title} @@ -190,6 +194,30 @@ A successful call returns a JSON structure that contains the information of one "realm": "native1", <8> "metadata": { <9> "application": "myapp" + }, + "role_descriptors": { <10> + "role-a": { + "cluster": [ + "monitor" + ], + "indices": [ + { + "names": [ + "index-a" + ], + "privileges": [ + "read" + ], + "allow_restricted_indices": false + } + ], + "applications": [ ], + "run_as": [ ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + } } }, { @@ -199,7 +227,8 @@ A successful call returns a JSON structure that contains the information of one "invalidated": false, "username": "user-y", "realm": "realm-2", - "metadata": {} + "metadata": {}, + "role_descriptors": { } <11> } ] } @@ -216,3 +245,8 @@ a value of `true`. Otherwise, it is `false`. <7> Principal for which this API key was created <8> Realm name of the principal for which this API key was created <9> Metadata of the API key +<10> The role descriptors assigned to this API key when it was <> +or last <>. The API key's +effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of +the owner user's permissions. +<11> An empty role descriptor means the API key inherits the owner user's permissions. diff --git a/x-pack/docs/en/rest-api/security/query-api-key.asciidoc b/x-pack/docs/en/rest-api/security/query-api-key.asciidoc index 94980066db53d..42c4bd83c8fef 100644 --- a/x-pack/docs/en/rest-api/security/query-api-key.asciidoc +++ b/x-pack/docs/en/rest-api/security/query-api-key.asciidoc @@ -46,8 +46,7 @@ The query supports a subset of query types, including <>, <>, <>, and <>. + -You can query all public information associated with an API key, including the -following values. +You can query the following public values associated with an API key. + .Valid values for `query` [%collapsible%open] @@ -78,6 +77,8 @@ Realm name of the API key owner. Metadata field associated with the API key, such as `metadata.my_field`. Because metadata is stored as a <> field type, all fields act like `keyword` fields when querying and sorting. + +NOTE: You cannot query the role descriptors of an API key. ==== include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=from] @@ -146,6 +147,30 @@ retrieved from one or more API keys: "realm": "reserved", "metadata": { "letter": "a" + }, + "role_descriptors": { <2> + "role-a": { + "cluster": [ + "monitor" + ], + "indices": [ + { + "names": [ + "index-a" + ], + "privileges": [ + "read" + ], + "allow_restricted_indices": false + } + ], + "applications": [ ], + "run_as": [ ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + } } }, { @@ -158,7 +183,8 @@ retrieved from one or more API keys: "realm": "reserved", "metadata": { "letter": "b" - } + }, + "role_descriptors": { } <3> } ] } @@ -166,6 +192,11 @@ retrieved from one or more API keys: // NOTCONSOLE <1> The list of API keys that were retrieved for this request +<2> The role descriptors that are assigned to this API key when it was <> +or last <>. Note the API key's +effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of +the owner user's permissions. +<3> An empty role descriptors means the API key inherits the owner user's permissions. If you create an API key with the following details: @@ -308,6 +339,7 @@ The response contains a list of matched API keys along with their sort values: "metadata": { "environment": "production" }, + "role_descriptors": { }, "_sort": [ "2021-08-18T01:29:14.811Z", <1> "app1-key-79" <2> @@ -323,6 +355,7 @@ The response contains a list of matched API keys along with their sort values: "metadata": { "environment": "production" }, + "role_descriptors": { }, "_sort": [ "2021-08-18T01:29:13.794Z", "app1-key-78" diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/40_view_role_descriptors.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/40_view_role_descriptors.yml new file mode 100644 index 0000000000000..6be181a9dfd5b --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/40_view_role_descriptors.yml @@ -0,0 +1,332 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + security.put_role: + name: "admin_role" + body: > + { + "cluster": ["manage_api_key"], + "indices": [ + { + "names": "*", + "privileges": ["all"] + } + ], + "applications": [ + { + "application": "myapp", + "privileges": ["*"], + "resources": ["*"] + } + ] + } + + - do: + security.put_user: + username: "api_key_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "admin_role" ], + "full_name" : "API key user" + } + + + +--- +teardown: + - do: + security.delete_role: + name: "admin_role" + ignore: 404 + + - do: + security.delete_user: + username: "api_key_user" + ignore: 404 + +--- +"Test API key role descriptors in Get and Query responses": + - skip: + features: transform_and_set + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user + security.create_api_key: + body: > + { + "name": "key-0-with-implicit-inherit" + } + - match: { name: "key-0-with-implicit-inherit" } + - is_true: id + - set: { id: api_key_id_0 } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user + security.get_api_key: + id: "$api_key_id_0" + - match: { "api_keys.0.id": "$api_key_id_0" } + - match: { "api_keys.0.role_descriptors": { } } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user + security.create_api_key: + body: > + { + "name": "key-1-with-explicit-inherit", + "role_descriptors": {} + } + - match: { name: "key-1-with-explicit-inherit" } + - is_true: id + - set: { id: api_key_id_1 } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user + security.get_api_key: + id: "$api_key_id_1" + - match: { "api_keys.0.id": "$api_key_id_1" } + - match: { "api_keys.0.role_descriptors": { } } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user + security.create_api_key: + body: > + { + "name": "key-2-with-single-assigned-role-descriptor", + "role_descriptors": { + "role-a": { + "cluster": ["all"], + "index": [ + { + "names": ["index-a"], + "privileges": ["read"] + } + ] + } + } + } + - match: { name: "key-2-with-single-assigned-role-descriptor" } + - is_true: id + - set: { id: api_key_id_2 } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user + security.get_api_key: + id: "$api_key_id_2" + - match: { "api_keys.0.id": "$api_key_id_2" } + - match: { "api_keys.0.role_descriptors": { + "role-a": { + "cluster": [ + "all" + ], + "indices": [ + { + "names": [ + "index-a" + ], + "privileges": [ + "read" + ], + "allow_restricted_indices": false + } + ], + "applications": [ ], + "run_as": [ ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + } + } + } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user + security.create_api_key: + body: > + { + "name": "key-3-with-multiple-assigned-role-descriptors", + "expiration": "1d", + "role_descriptors": { + "role-a": { + "cluster": ["all"], + "index": [ + { + "names": ["index-a"], + "privileges": ["read"] + } + ] + }, + "role-b": { + "cluster": ["manage"], + "index": [ + { + "names": ["index-b"], + "privileges": ["all"] + } + ] + } + } + } + - match: { name: "key-3-with-multiple-assigned-role-descriptors" } + - is_true: id + - set: { id: api_key_id_3 } + + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user + security.get_api_key: + id: "$api_key_id_3" + - match: { "api_keys.0.id": "$api_key_id_3" } + - match: { "api_keys.0.role_descriptors": { + "role-a": { + "cluster": [ + "all" + ], + "indices": [ + { + "names": [ + "index-a" + ], + "privileges": [ + "read" + ], + "allow_restricted_indices": false + } + ], + "applications": [ ], + "run_as": [ ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + }, + "role-b": { + "cluster": [ + "manage" + ], + "indices": [ + { + "names": [ + "index-b" + ], + "privileges": [ + "all" + ], + "allow_restricted_indices": false + } + ], + "applications": [ ], + "run_as": [ ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + } + } + } + + # Query API keys + - do: + headers: + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user + security.query_api_keys: + body: > + { + "sort": [ "name" ] + } + - match: { total: 4 } + - match: { count: 4 } + - match: { "api_keys.0.name": "key-0-with-implicit-inherit" } + - match: { "api_keys.0.role_descriptors": { } } + - match: { "api_keys.1.name": "key-1-with-explicit-inherit" } + - match: { "api_keys.1.role_descriptors": { } } + - match: { "api_keys.2.name": "key-2-with-single-assigned-role-descriptor"} + - match: { "api_keys.2.role_descriptors": { + "role-a": { + "cluster": [ + "all" + ], + "indices": [ + { + "names": [ + "index-a" + ], + "privileges": [ + "read" + ], + "allow_restricted_indices": false + } + ], + "applications": [ ], + "run_as": [ ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + } + } + } + - match: { "api_keys.3.name": "key-3-with-multiple-assigned-role-descriptors"} + - match: { "api_keys.3.role_descriptors": { + "role-a": { + "cluster": [ + "all" + ], + "indices": [ + { + "names": [ + "index-a" + ], + "privileges": [ + "read" + ], + "allow_restricted_indices": false + } + ], + "applications": [ ], + "run_as": [ ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + }, + "role-b": { + "cluster": [ + "manage" + ], + "indices": [ + { + "names": [ + "index-b" + ], + "privileges": [ + "all" + ], + "allow_restricted_indices": false + } + ], + "applications": [ ], + "run_as": [ ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + } + } + } + From c4dfc66ea24aabc2cbe45cb668a5f2648ec1917a Mon Sep 17 00:00:00 2001 From: Jun Ohtani Date: Wed, 17 Aug 2022 11:04:11 +0900 Subject: [PATCH 222/265] [DOCS] Use CJKWidthCharFilter in JapaneseAnalyzer (#89364) After Lucene 9.0, JapaneseAnalyzer uses CJKWidthCharFilter instead of CJKWidthFilter. See details https://issues.apache.org/jira/browse/LUCENE-9853 Co-authored-by: Julie Tibshirani --- docs/plugins/analysis-kuromoji.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc index 75bd6cc446d0d..9759b7fdd21f5 100644 --- a/docs/plugins/analysis-kuromoji.asciidoc +++ b/docs/plugins/analysis-kuromoji.asciidoc @@ -10,12 +10,12 @@ include::install_remove.asciidoc[] [[analysis-kuromoji-analyzer]] ==== `kuromoji` analyzer -The `kuromoji` analyzer consists of the following tokenizer and token filters: +The `kuromoji` analyzer uses the following analysis chain: +* `CJKWidthCharFilter` from Lucene * <> * <> token filter * <> token filter -* {ref}/analysis-cjk-width-tokenfilter.html[`cjk_width`] token filter * <> token filter * <> token filter * {ref}/analysis-lowercase-tokenfilter.html[`lowercase`] token filter From 03f3c8119ac1e7bb7034be1f7c4f3355fec4b300 Mon Sep 17 00:00:00 2001 From: Christos Soulios <1561376+csoulios@users.noreply.github.com> Date: Wed, 17 Aug 2022 08:45:22 +0300 Subject: [PATCH 223/265] Downsampling: copy`index.hidden` setting from source (#89177) Rollup indices are initially created as hidden (index.hidden: true). At the end of the rollup process, we must set this setting to the value the source index has --- .../rollup/v2/TransportRollupAction.java | 10 +++- .../v2/RollupActionSingleNodeTests.java | 53 +++++++++++-------- 2 files changed, 40 insertions(+), 23 deletions(-) diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java index c6f95df65d669..e70a99e47bd2f 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java @@ -267,6 +267,14 @@ protected void masterOperation( if (sourceIndexMetadata.getNumberOfReplicas() > 0) { settings.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, sourceIndexMetadata.getNumberOfReplicas()); } + // Setting index.hidden has been initially set to true. We revert this to the value of the source index + if (sourceIndexMetadata.isHidden() == false) { + if (sourceIndexMetadata.getSettings().keySet().contains(IndexMetadata.SETTING_INDEX_HIDDEN)) { + settings.put(IndexMetadata.SETTING_INDEX_HIDDEN, false); + } else { + settings.putNull(IndexMetadata.SETTING_INDEX_HIDDEN); + } + } UpdateSettingsRequest updateSettingsReq = new UpdateSettingsRequest(settings.build(), rollupIndexName); updateSettingsReq.setParentTask(parentTask); client.admin().indices().updateSettings(updateSettingsReq, ActionListener.wrap(updateSettingsResponse -> { @@ -545,7 +553,7 @@ private void createRollupIndex( * case rollup will fail. */ Settings.builder() - .put(IndexMetadata.INDEX_HIDDEN_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, sourceIndexMetadata.getNumberOfShards()) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java index 3bffb800a6801..55e21d1c4c282 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java @@ -165,23 +165,25 @@ public void setup() { * check that the value of the label (last value) matches the value * of the corresponding metric which uses a last_value metric type. */ + Settings.Builder settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)) + .put( + IndexSettings.TIME_SERIES_START_TIME.getKey(), + DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(Instant.ofEpochMilli(startTime).toEpochMilli()) + ) + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z"); + + if (randomBoolean()) { + settings.put(IndexMetadata.SETTING_INDEX_HIDDEN, randomBoolean()); + } assertAcked( client().admin() .indices() .prepareCreate(sourceIndex) - .setSettings( - Settings.builder() - .put("index.number_of_shards", numOfShards) - .put("index.number_of_replicas", numOfReplicas) - .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)) - .put( - IndexSettings.TIME_SERIES_START_TIME.getKey(), - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(Instant.ofEpochMilli(startTime).toEpochMilli()) - ) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z") - .build() - ) + .setSettings(settings.build()) .setMapping( FIELD_TIMESTAMP, "type=date", @@ -769,7 +771,10 @@ private void assertRollupIndexSettings(String sourceIndex, String rollupIndex, G ); assertEquals(sourceIndex, indexSettingsResp.getSetting(rollupIndex, IndexMetadata.INDEX_ROLLUP_SOURCE_NAME_KEY)); - assertEquals(indexSettingsResp.getSetting(sourceIndex, "index.mode"), indexSettingsResp.getSetting(rollupIndex, "index.mode")); + assertEquals( + indexSettingsResp.getSetting(sourceIndex, IndexSettings.MODE.getKey()), + indexSettingsResp.getSetting(rollupIndex, IndexSettings.MODE.getKey()) + ); assertNotNull(indexSettingsResp.getSetting(sourceIndex, IndexSettings.TIME_SERIES_START_TIME.getKey())); assertNotNull(indexSettingsResp.getSetting(rollupIndex, IndexSettings.TIME_SERIES_START_TIME.getKey())); @@ -784,11 +789,11 @@ private void assertRollupIndexSettings(String sourceIndex, String rollupIndex, G indexSettingsResp.getSetting(sourceIndex, IndexSettings.TIME_SERIES_END_TIME.getKey()), indexSettingsResp.getSetting(rollupIndex, IndexSettings.TIME_SERIES_END_TIME.getKey()) ); - assertNotNull(indexSettingsResp.getSetting(sourceIndex, "index.routing_path")); - assertNotNull(indexSettingsResp.getSetting(rollupIndex, "index.routing_path")); + assertNotNull(indexSettingsResp.getSetting(sourceIndex, IndexMetadata.INDEX_ROUTING_PATH.getKey())); + assertNotNull(indexSettingsResp.getSetting(rollupIndex, IndexMetadata.INDEX_ROUTING_PATH.getKey())); assertEquals( - indexSettingsResp.getSetting(sourceIndex, "index.routing_path"), - indexSettingsResp.getSetting(rollupIndex, "index.routing_path") + indexSettingsResp.getSetting(sourceIndex, IndexMetadata.INDEX_ROUTING_PATH.getKey()), + indexSettingsResp.getSetting(rollupIndex, IndexMetadata.INDEX_ROUTING_PATH.getKey()) ); assertNotNull(indexSettingsResp.getSetting(sourceIndex, IndexMetadata.SETTING_NUMBER_OF_SHARDS)); @@ -804,7 +809,11 @@ private void assertRollupIndexSettings(String sourceIndex, String rollupIndex, G indexSettingsResp.getSetting(sourceIndex, IndexMetadata.SETTING_NUMBER_OF_REPLICAS), indexSettingsResp.getSetting(rollupIndex, IndexMetadata.SETTING_NUMBER_OF_REPLICAS) ); - assertEquals("true", indexSettingsResp.getSetting(rollupIndex, "index.blocks.write")); + assertEquals("true", indexSettingsResp.getSetting(rollupIndex, IndexMetadata.SETTING_BLOCKS_WRITE)); + assertEquals( + indexSettingsResp.getSetting(sourceIndex, IndexMetadata.SETTING_INDEX_HIDDEN), + indexSettingsResp.getSetting(rollupIndex, IndexMetadata.SETTING_INDEX_HIDDEN) + ); } private AggregationBuilder buildAggregations( @@ -869,9 +878,9 @@ private String createDataStream() throws Exception { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.getDefault()); Template indexTemplate = new Template( Settings.builder() - .put("index.number_of_shards", numOfShards) - .put("index.number_of_replicas", numOfReplicas) - .put("index.mode", "time_series") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)) .build(), new CompressedXContent(""" From f2257cae89c872664fd89c52789d71de8d8c106c Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Wed, 17 Aug 2022 09:40:30 +0200 Subject: [PATCH 224/265] [DOCS] Adds note about escaping backslashes in regex (#89276) * [DOCS] Adds note about escaping backslashes in regex * Fix typo * Simplify example --- .../query-dsl/regexp-syntax.asciidoc | 33 ++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/docs/reference/query-dsl/regexp-syntax.asciidoc b/docs/reference/query-dsl/regexp-syntax.asciidoc index 270f6fe79e662..28c9c882542c1 100644 --- a/docs/reference/query-dsl/regexp-syntax.asciidoc +++ b/docs/reference/query-dsl/regexp-syntax.asciidoc @@ -37,7 +37,38 @@ backslash or surround it with double quotes. For example: \\ # renders as a literal '\' "john@smith.com" # renders as 'john@smith.com' .... - + +[NOTE] +==== + +The backslash is an escape character in both JSON strings and regular +expressions. You need to escape both backslashes in a query, unless you use a +language client, which takes care of this. For example, the string `a\b` needs +to be indexed as `"a\\b"`: + +[source,console] +-------------------------------------------------- +PUT my-index-000001/_doc/1 +{ + "my_field": "a\\b" +} +-------------------------------------------------- + +This document matches the following `regexp` query: + +[source,console] +-------------------------------------------------- +GET my-index-000001/_search +{ + "query": { + "regexp": { + "my_field.keyword": "a\\\\.*" + } + } +} +-------------------------------------------------- +//TEST[continued] +==== [discrete] [[regexp-standard-operators]] From af8ac507889dfe16aa4cf82130ce58303eed214b Mon Sep 17 00:00:00 2001 From: Anthony McGlone <102866938+anthonymcglone2022@users.noreply.github.com> Date: Wed, 17 Aug 2022 08:53:14 +0100 Subject: [PATCH 225/265] [DOCS] Update search_after section with an example (#89328) * [DOCS] Update search_after section with an example * Update docs/reference/search/search-your-data/paginate-search-results.asciidoc Co-authored-by: Abdon Pijpelink * Update docs/reference/search/search-your-data/paginate-search-results.asciidoc Co-authored-by: Abdon Pijpelink * Update docs/reference/search/search-your-data/paginate-search-results.asciidoc Co-authored-by: Abdon Pijpelink Co-authored-by: Abdon Pijpelink --- .../paginate-search-results.asciidoc | 48 ++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/docs/reference/search/search-your-data/paginate-search-results.asciidoc b/docs/reference/search/search-your-data/paginate-search-results.asciidoc index 931e6cffc6675..4fd39efce8a05 100644 --- a/docs/reference/search/search-your-data/paginate-search-results.asciidoc +++ b/docs/reference/search/search-your-data/paginate-search-results.asciidoc @@ -46,7 +46,53 @@ You can use the `search_after` parameter to retrieve the next page of hits using a set of <> from the previous page. Using `search_after` requires multiple search requests with the same `query` and -`sort` values. If a <> occurs between these requests, +`sort` values. The first step is to run an initial request. The following +example sorts the results by two fields (`date` and `tie_breaker_id`): +[source,js] +-------------------------------------------------- +GET twitter/_search +{ + "query": { + "match" : { + "title" : "elasticsearch" + } + }, + "sort": [ + {"date": "asc"}, + {"tie_breaker_id": "asc"} <1> + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] +// TEST[s/"tie_breaker_id": "asc"/"tie_breaker_id": {"unmapped_type": "keyword"}/] + +<1> A copy of the `_id` field with `doc_values` enabled + +The search response includes an array of `sort` values for each hit. To retrieve +the next page of results, repeat the request, take the `sort` values from the +last hit, and insert those into the `search_after` array: +[source,js] +-------------------------------------------------- +GET twitter/_search +{ + "query": { + "match" : + "title" : "elasticsearch" + } + }, + "search_after": [1463538857, "654323"], + "sort": [ + {"date": "asc"}, + {"tie_breaker_id": "asc"} + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] +// TEST[s/"tie_breaker_id": "asc"/"tie_breaker_id": {"unmapped_type": "keyword"}/] +Repeat this process by updating the `search_after` array every time you retrieve a +new page of results. If a <> occurs between these requests, the order of your results may change, causing inconsistent results across pages. To prevent this, you can create a <> to preserve the current index state over your searches. From 2c37c596d0337e0641c538119a610ca780e8e27f Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Wed, 17 Aug 2022 10:17:29 +0200 Subject: [PATCH 226/265] Allocation commands related refactoring (#89400) This change includes: * moving resetFailedAllocationCounter to a common place in RoutingNodes so that it is accessible from multiple allocation service implementation * splitting ClusterRerouteTests#testClusterStateUpdateTask into 2 distinct test scenarios to avoid reusing the task --- .../cluster/routing/RoutingNodes.java | 24 ++++ .../routing/allocation/AllocationService.java | 29 +---- .../cluster/reroute/ClusterRerouteTests.java | 123 +++++++++--------- 3 files changed, 89 insertions(+), 87 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index e14cd918a2bff..d20f3e75bcf90 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -985,6 +985,30 @@ public void ignoreShard(ShardRouting shard, AllocationStatus allocationStatus, R ignored.add(shard); } + public void resetFailedAllocationCounter(RoutingChangesObserver routingChangesObserver) { + final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = iterator(); + while (unassignedIterator.hasNext()) { + ShardRouting shardRouting = unassignedIterator.next(); + UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + unassignedIterator.updateUnassigned( + new UnassignedInfo( + unassignedInfo.getNumFailedAllocations() > 0 ? UnassignedInfo.Reason.MANUAL_ALLOCATION : unassignedInfo.getReason(), + unassignedInfo.getMessage(), + unassignedInfo.getFailure(), + 0, + unassignedInfo.getUnassignedTimeInNanos(), + unassignedInfo.getUnassignedTimeInMillis(), + unassignedInfo.isDelayed(), + unassignedInfo.getLastAllocationStatus(), + Collections.emptySet(), + unassignedInfo.getLastAllocatedNodeId() + ), + shardRouting.recoverySource(), + routingChangesObserver + ); + } + } + public class UnassignedIterator implements Iterator, ExistingShardsAllocator.UnassignedAllocationHandler { private final ListIterator iterator; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 7d97d1ea651ea..28d1ab4877be3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -352,33 +352,6 @@ private void removeDelayMarkers(RoutingAllocation allocation) { } } - /** - * Reset failed allocation counter for unassigned shards - */ - private void resetFailedAllocationCounter(RoutingAllocation allocation) { - final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator(); - while (unassignedIterator.hasNext()) { - ShardRouting shardRouting = unassignedIterator.next(); - UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); - unassignedIterator.updateUnassigned( - new UnassignedInfo( - unassignedInfo.getNumFailedAllocations() > 0 ? UnassignedInfo.Reason.MANUAL_ALLOCATION : unassignedInfo.getReason(), - unassignedInfo.getMessage(), - unassignedInfo.getFailure(), - 0, - unassignedInfo.getUnassignedTimeInNanos(), - unassignedInfo.getUnassignedTimeInMillis(), - unassignedInfo.isDelayed(), - unassignedInfo.getLastAllocationStatus(), - Collections.emptySet(), - unassignedInfo.getLastAllocatedNodeId() - ), - shardRouting.recoverySource(), - allocation.changes() - ); - } - } - /** * Internal helper to cap the number of elements in a potentially long list for logging. * @@ -414,7 +387,7 @@ public CommandsResult reroute(final ClusterState clusterState, AllocationCommand allocation.ignoreDisable(true); if (retryFailed) { - resetFailedAllocationCounter(allocation); + allocation.routingNodes().unassigned().resetFailedAllocationCounter(allocation.changes()); } RoutingExplanations explanations = commands.execute(allocation, explain); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java index 627052aeea62b..50b2ef14fade8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java @@ -15,7 +15,9 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -34,7 +36,6 @@ import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicReference; @@ -67,92 +68,96 @@ public void testSerializeRequest() throws IOException { assertEquals(req.getCommands().commands().size(), deserializedReq.getCommands().commands().size()); } - public void testClusterStateUpdateTask() { + public void testClusterStateUpdateTaskInDryRun() { AllocationService allocationService = new AllocationService( - new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), + new AllocationDeciders(List.of(new MaxRetryAllocationDecider())), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE, EmptySnapshotsInfoService.INSTANCE ); ClusterState clusterState = createInitialClusterState(allocationService); - ClusterRerouteRequest req = new ClusterRerouteRequest(); - req.dryRun(true); - AtomicReference responseRef = new AtomicReference<>(); - ActionListener responseActionListener = new ActionListener() { - @Override - public void onResponse(ClusterRerouteResponse clusterRerouteResponse) { - responseRef.set(clusterRerouteResponse); - } - - @Override - public void onFailure(Exception e) { - - } - }; - TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask task = - new TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask( - logger, - allocationService, - req, - responseActionListener - ); + + var responseRef = new AtomicReference(); + var responseActionListener = ActionListener.wrap( + responseRef::set, + exception -> { throw new AssertionError("Should not fail in test", exception); } + ); + + var request = new ClusterRerouteRequest().dryRun(true); + var task = new TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask( + logger, + allocationService, + request, + responseActionListener + ); + ClusterState execute = task.execute(clusterState); - assertSame(execute, clusterState); // dry-run + assertSame(execute, clusterState); // dry-run should keep the current cluster state task.onAllNodesAcked(); assertNotSame(responseRef.get().getState(), execute); + } - req.dryRun(false);// now we allocate + public void testClusterStateUpdateTask() { + AllocationService allocationService = new AllocationService( + new AllocationDeciders(List.of(new MaxRetryAllocationDecider())), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + ClusterState clusterState = createInitialClusterState(allocationService); + + var req = new ClusterRerouteRequest().dryRun(false); + var task = new TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask( + logger, + allocationService, + req, + ActionListener.noop() + ); final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY); // now fail it N-1 times for (int i = 0; i < retries; i++) { + // execute task ClusterState newState = task.execute(clusterState); - assertNotSame(newState, clusterState); // dry-run=false + assertNotSame(newState, clusterState); + assertStateAndFailedAllocations(newState.routingTable().index("idx"), INITIALIZING, i); clusterState = newState; - RoutingTable routingTable = clusterState.routingTable(); - assertEquals(routingTable.index("idx").size(), 1); - assertEquals(routingTable.index("idx").shard(0).shard(0).state(), INITIALIZING); - assertEquals(routingTable.index("idx").shard(0).shard(0).unassignedInfo().getNumFailedAllocations(), i); - List failedShards = Collections.singletonList( - new FailedShard( - routingTable.index("idx").shard(0).shard(0), - "boom" + i, - new UnsupportedOperationException(), - randomBoolean() - ) + + // apply failed shards + newState = allocationService.applyFailedShards( + clusterState, + List.of( + new FailedShard( + clusterState.routingTable().index("idx").shard(0).shard(0), + "boom" + i, + new RuntimeException("test-failure"), + randomBoolean() + ) + ), + List.of() ); - newState = allocationService.applyFailedShards(clusterState, failedShards, List.of()); assertThat(newState, not(equalTo(clusterState))); + assertStateAndFailedAllocations(newState.routingTable().index("idx"), i == retries - 1 ? UNASSIGNED : INITIALIZING, i + 1); clusterState = newState; - routingTable = clusterState.routingTable(); - assertEquals(routingTable.index("idx").size(), 1); - if (i == retries - 1) { - assertEquals(routingTable.index("idx").shard(0).shard(0).state(), UNASSIGNED); - } else { - assertEquals(routingTable.index("idx").shard(0).shard(0).state(), INITIALIZING); - } - assertEquals(routingTable.index("idx").shard(0).shard(0).unassignedInfo().getNumFailedAllocations(), i + 1); } // without retry_failed we won't allocate that shard ClusterState newState = task.execute(clusterState); - assertNotSame(newState, clusterState); // dry-run=false - task.onAllNodesAcked(); - assertSame(responseRef.get().getState(), newState); - RoutingTable routingTable = clusterState.routingTable(); - assertEquals(routingTable.index("idx").size(), 1); - assertEquals(routingTable.index("idx").shard(0).shard(0).state(), UNASSIGNED); - assertEquals(routingTable.index("idx").shard(0).shard(0).unassignedInfo().getNumFailedAllocations(), retries); + assertNotSame(newState, clusterState); + assertStateAndFailedAllocations(clusterState.routingTable().index("idx"), UNASSIGNED, retries); req.setRetryFailed(true); // now we manually retry and get the shard back into initializing newState = task.execute(clusterState); assertNotSame(newState, clusterState); // dry-run=false - clusterState = newState; - routingTable = clusterState.routingTable(); - assertEquals(1, routingTable.index("idx").size()); - assertEquals(INITIALIZING, routingTable.index("idx").shard(0).shard(0).state()); - assertEquals(0, routingTable.index("idx").shard(0).shard(0).unassignedInfo().getNumFailedAllocations()); + assertStateAndFailedAllocations(newState.routingTable().index("idx"), INITIALIZING, 0); + } + + private void assertStateAndFailedAllocations(IndexRoutingTable indexRoutingTable, ShardRoutingState state, int failedAllocations) { + assertThat(indexRoutingTable.size(), equalTo(1)); + assertThat(indexRoutingTable.shard(0).shard(0).state(), equalTo(state)); + assertThat(indexRoutingTable.shard(0).shard(0).unassignedInfo().getNumFailedAllocations(), equalTo(failedAllocations)); } private ClusterState createInitialClusterState(AllocationService service) { From 59c745c25b2cd5da7ab5bb0e08bd326782ff1e4f Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Wed, 17 Aug 2022 10:28:57 +0200 Subject: [PATCH 227/265] [DOCS] Bulk update API keys API (#89215) API docs for bulk update API keys API. Relates: #88758 --- .../security/bulk-update-api-keys.asciidoc | 364 +++++++++++++++++- .../rest-api/security/update-api-key.asciidoc | 12 +- 2 files changed, 370 insertions(+), 6 deletions(-) diff --git a/x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc index aaef85677f0ab..698528715b407 100644 --- a/x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc @@ -2,4 +2,366 @@ [[security-api-bulk-update-api-keys]] === Bulk update API keys API -coming::[8.5.0] +++++ +Bulk update API keys +++++ + +[[security-api-bulk-update-api-keys-request]] +==== {api-request-title} + +`POST /_security/api_key/_bulk_update` + +[[security-api-bulk-update-api-keys-prereqs]] +==== {api-prereq-title} + +* To use this API, you must have at least the `manage_own_api_key` cluster privilege. +Users can only update API keys that they created or that were granted to them. +To update another user's API key, use the <> +to submit a request on behalf of another user. + +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. +To update API keys, the owner user's credentials are required. + +[[security-api-bulk-update-api-keys-desc]] +==== {api-description-title} + +This API is similar to <> but allows you to apply the *same update* to multiple API keys in one API call. +This operation can greatly improve performance over making individual updates. + +It's not possible to update expired or <> API keys. + +This API supports updates to API key access scope and metadata. +The access scope of each API key is derived from the <> you specify in the request, and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. + +[IMPORTANT] +==== +If you don't specify <> in the request, a call to this API might still change an API key's access scope. +This change can occur if the owner user's permissions have changed since the API key was created or last modified. +==== + +[[security-api-bulk-update-api-keys-request-body]] +==== {api-request-body-title} + +You can specify the following parameters in the request body. + +`ids`:: (Required, list) The IDs of the API keys to update. + +[[security-api-bulk-update-api-keys-api-key-role-descriptors]] +`role_descriptors`:: +(Optional, object) The role descriptors to assign to the API keys. +An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. +The structure of a role descriptor is the same as the request for the <>. + +`metadata`:: +(Optional, object) Arbitrary, nested metadata to associate with the API keys. + +Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. +Any information specified with this parameter fully replaces metadata previously associated with the API key. + +[[security-api-bulk-update-api-keys-response-body]] +==== {api-response-body-title} + +A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and <> for any failed update. + +[[security-api-bulk-update-api-keys-example]] +==== {api-examples-title} + +For the examples below, assume that a user creates two API keys. +The user creates the first API key: + +[source,console] +------------------------------------------------------------ +POST /_security/api_key +{ + "name": "my-api-key", + "role_descriptors": { + "role-a": { + "cluster": ["all"], + "index": [ + { + "names": ["index-a*"], + "privileges": ["read"] + } + ] + } + }, + "metadata": { + "application": "my-application", + "environment": { + "level": 1, + "trusted": true, + "tags": ["dev", "staging"] + } + } +} +------------------------------------------------------------ + +This results in a response with the following API key information. + +[source,console-result] +-------------------------------------------------- +{ + "id": "VuaCfGcBCdbkQm-e5aOx", + "name": "my-api-key", + "api_key": "ui2lp2axTNmsyakw9tvNnw", + "encoded": "VnVhQ2ZHY0JDZGJrUW0tZTVhT3g6dWkybHAyYXhUTm1zeWFrdzl0dk5udw==" +} +-------------------------------------------------- +// TESTRESPONSE[s/VuaCfGcBCdbkQm-e5aOx/$body.id/] +// TESTRESPONSE[s/ui2lp2axTNmsyakw9tvNnw/$body.api_key/] +// TESTRESPONSE[s/VnVhQ2ZHY0JDZGJrUW0tZTVhT3g6dWkybHAyYXhUTm1zeWFrdzl0dk5udw==/$body.encoded/] + +The user creates the second API key: + +[source,console] +------------------------------------------------------------ +POST /_security/api_key +{ + "name": "my-other-api-key", + "metadata": { + "application": "my-application", + "environment": { + "level": 2, + "trusted": true, + "tags": ["dev", "staging"] + } + } +} +------------------------------------------------------------ + +Resulting in the following API key information. + +[source,console-result] +-------------------------------------------------- +{ + "id": "H3_AhoIBA9hmeQJdg7ij", + "name": "my-other-api-key", + "api_key": "134G4ilmT_uGWXHRfJfXXA", + "encoded": "SDNfQWhvSUJBOWhtZVFKZGc3aWo6MTM0RzRpbG1UX3VHV1hIUmZKZlhYQQ==" +} +-------------------------------------------------- +// TESTRESPONSE[s/H3_AhoIBA9hmeQJdg7ij/$body.id/] +// TESTRESPONSE[s/134G4ilmT_uGWXHRfJfXXA/$body.api_key/] +// TESTRESPONSE[s/SDNfQWhvSUJBOWhtZVFKZGc3aWo6MTM0RzRpbG1UX3VHV1hIUmZKZlhYQQ==/$body.encoded/] + +Further, assume that the owner user's permissions are: + +[[security-api-bulk-update-api-keys-examples-user-permissions]] +[source,js] +-------------------------------------------------- +{ + "cluster": ["all"], + "index": [ + { + "names": ["*"], + "privileges": ["all"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +The following example updates the API keys created above, assigning them new role descriptors and metadata. + +[source,console] +---- +POST /_security/api_key/_bulk_update +{ + "ids": [ + "VuaCfGcBCdbkQm-e5aOx", + "H3_AhoIBA9hmeQJdg7ij" + ], + "role_descriptors": { + "role-a": { + "index": [ + { + "names": ["*"], + "privileges": ["write"] + } + ] + } + }, + "metadata": { + "environment": { + "level": 2, + "trusted": true, + "tags": ["production"] + } + } +} +---- +// TEST[skip:api key ids not available] + +A successful call returns a JSON structure indicating that the API keys were updated: + +[source,console-result] +---- +{ + "updated": [ + "VuaCfGcBCdbkQm-e5aOx", + "H3_AhoIBA9hmeQJdg7ij" + ], + "noops": [] +} +---- + +Both API keys' effective permissions after the update will be the intersection of the supplied role descriptors and the <>: + +[source,js] +-------------------------------------------------- +{ + "index": [ + { + "names": ["*"], + "privileges": ["write"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +The following example removes the API keys' previously assigned permissions, making them inherit the owner user's full permissions. + +[source,console] +---- +POST /_security/api_key/_bulk_update +{ + "ids": [ + "VuaCfGcBCdbkQm-e5aOx", + "H3_AhoIBA9hmeQJdg7ij" + ], + "role_descriptors": {} +} +---- +// TEST[skip:api key ids not available] + +Which returns the response: + +[source,console-result] +---- +{ + "updated": [ + "VuaCfGcBCdbkQm-e5aOx", + "H3_AhoIBA9hmeQJdg7ij" + ], + "noops": [] +} +---- + +The API keys' effective permissions after the update will be the same as the owner user's: + +[source,js] +-------------------------------------------------- +{ + "cluster": ["all"], + "index": [ + { + "names": ["*"], + "privileges": ["all"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +For the next example, assume that the owner user's permissions have changed from <> to: + +[source,js] +-------------------------------------------------- +{ + "cluster": ["manage_security"], + "index": [ + { + "names": ["*"], + "privileges": ["read"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +The following request auto-updates the snapshot of the user's permissions associated with the two API keys. + +[source,console] +---- +POST /_security/api_key/_bulk_update +{ + "ids": [ + "VuaCfGcBCdbkQm-e5aOx", + "H3_AhoIBA9hmeQJdg7ij" + ] +} +---- +// TEST[skip:api key ids not available] + +Which returns the response: + +[source,console-result] +---- +{ + "updated": [ + "VuaCfGcBCdbkQm-e5aOx", + "H3_AhoIBA9hmeQJdg7ij" + ], + "noops": [] +} +---- + +Resulting in the following effective permissions for both API keys: + +[source,js] +-------------------------------------------------- +{ + "cluster": ["manage_security"], + "index": [ + { + "names": ["*"], + "privileges": ["read"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +If any API keys fail to update, error details are included in the `errors` field. +For example: + +[[security-api-bulk-update-api-keys-examples-errors]] +[source,js] +-------------------------------------------------- +{ + "updated": ["VuaCfGcBCdbkQm-e5aOx"], + "noops": [], + "errors": { <1> + "count": 3, + "details": { + "g_PqP4IBcBaEQdwM5-WI": { <2> + "type": "resource_not_found_exception", + "reason": "no API key owned by requesting user found for ID [g_PqP4IBcBaEQdwM5-WI]" + }, + "OM4cg4IBGgpHBfLerY4B": { + "type": "illegal_argument_exception", + "reason": "cannot update invalidated API key [OM4cg4IBGgpHBfLerY4B]" + }, + "Os4gg4IBGgpHBfLe2I7j": { + "type": "exception", + "reason": "bulk request execution failure", + "caused_by": { <3> + "type" : "version_conflict_engine_exception", + "reason" : "[1]: version conflict, required seqNo [1], primary term [1]. current document has seqNo [2] and primary term [1]" + } + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +<1> This field is not present in the response when `count` is 0. +<2> The ID of the API key for which the error occurred. +<3> The error details may also include a `caused_by` field. diff --git a/x-pack/docs/en/rest-api/security/update-api-key.asciidoc b/x-pack/docs/en/rest-api/security/update-api-key.asciidoc index c8104c295e479..8fc71c7b8257d 100644 --- a/x-pack/docs/en/rest-api/security/update-api-key.asciidoc +++ b/x-pack/docs/en/rest-api/security/update-api-key.asciidoc @@ -26,6 +26,8 @@ To update an API key, the owner user's credentials are required. ==== {api-description-title} Use this API to update API keys created by the <> or <> APIs. +If you need to apply the same update to many API keys, you can use <> to reduce overhead. + It's not possible to update expired API keys, or API keys that have been invalidated by <>. This API supports updates to an API key's access scope and metadata. @@ -53,9 +55,9 @@ You can specify the following parameters in the request body, which is optional. `role_descriptors`:: (Optional, object) The role descriptors to assign to this API key. The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. -If no privileges are assigned, the API key inherits the owner user's full permissions. -You can assign new privileges to the API key by specifying them in this parameter. -To remove assigned privileges, you can supply an empty `role_descriptors` parameter. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, you can supply an empty `role_descriptors` parameter, i.e., an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. The structure of a role descriptor is the same as the request for the <>. @@ -189,7 +191,7 @@ The API key's effective permissions after the update will be the intersection of -------------------------------------------------- // NOTCONSOLE -The following example removes the API key's previously assigned permissions. +The following example removes the API key's previously assigned permissions, making it inherit the owner user's full permissions. [source,console] ---- @@ -209,7 +211,7 @@ Which returns the response: } ---- -The API key's effective permissions after the update will the same as the <>: +The API key's effective permissions after the update will be the same as the owner user's: [source,js] -------------------------------------------------- From 27061a530e75d81f9b59071f97a24e96451d5202 Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Wed, 17 Aug 2022 10:50:15 +0200 Subject: [PATCH 228/265] Revert "[DOCS] Update search_after section with an example (#89328)" (#89411) Reverts elastic/elasticsearch#89328 --- .../paginate-search-results.asciidoc | 48 +------------------ 1 file changed, 1 insertion(+), 47 deletions(-) diff --git a/docs/reference/search/search-your-data/paginate-search-results.asciidoc b/docs/reference/search/search-your-data/paginate-search-results.asciidoc index 4fd39efce8a05..931e6cffc6675 100644 --- a/docs/reference/search/search-your-data/paginate-search-results.asciidoc +++ b/docs/reference/search/search-your-data/paginate-search-results.asciidoc @@ -46,53 +46,7 @@ You can use the `search_after` parameter to retrieve the next page of hits using a set of <> from the previous page. Using `search_after` requires multiple search requests with the same `query` and -`sort` values. The first step is to run an initial request. The following -example sorts the results by two fields (`date` and `tie_breaker_id`): -[source,js] --------------------------------------------------- -GET twitter/_search -{ - "query": { - "match" : { - "title" : "elasticsearch" - } - }, - "sort": [ - {"date": "asc"}, - {"tie_breaker_id": "asc"} <1> - ] -} --------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] -// TEST[s/"tie_breaker_id": "asc"/"tie_breaker_id": {"unmapped_type": "keyword"}/] - -<1> A copy of the `_id` field with `doc_values` enabled - -The search response includes an array of `sort` values for each hit. To retrieve -the next page of results, repeat the request, take the `sort` values from the -last hit, and insert those into the `search_after` array: -[source,js] --------------------------------------------------- -GET twitter/_search -{ - "query": { - "match" : - "title" : "elasticsearch" - } - }, - "search_after": [1463538857, "654323"], - "sort": [ - {"date": "asc"}, - {"tie_breaker_id": "asc"} - ] -} --------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] -// TEST[s/"tie_breaker_id": "asc"/"tie_breaker_id": {"unmapped_type": "keyword"}/] -Repeat this process by updating the `search_after` array every time you retrieve a -new page of results. If a <> occurs between these requests, +`sort` values. If a <> occurs between these requests, the order of your results may change, causing inconsistent results across pages. To prevent this, you can create a <> to preserve the current index state over your searches. From e2bf861511fa3d39dbfaccaefcd8ecce408c53e2 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 17 Aug 2022 10:57:35 +0200 Subject: [PATCH 229/265] Introduce TriangleTreeReader.DecodedVisitor (#89401) This commit introduces a DecodedVisitor that removes duplicate code for visitors that need to decode coordinates. --- .../index/fielddata/Component2DVisitor.java | 90 ++++++------------- .../index/fielddata/LabelPositionVisitor.java | 44 ++++----- .../index/fielddata/TriangleTreeReader.java | 88 ++++++++++++++++++ 3 files changed, 130 insertions(+), 92 deletions(-) diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/Component2DVisitor.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/Component2DVisitor.java index 2a0fad0af3351..d6881969da82e 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/Component2DVisitor.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/Component2DVisitor.java @@ -15,14 +15,13 @@ * A {@link TriangleTreeReader.Visitor} implementation for {@link Component2D} geometries. * It can solve spatial relationships against a serialize triangle tree. */ -public abstract class Component2DVisitor implements TriangleTreeReader.Visitor { +public abstract class Component2DVisitor extends TriangleTreeReader.DecodedVisitor { protected final Component2D component2D; - private final CoordinateEncoder encoder; private Component2DVisitor(Component2D component2D, CoordinateEncoder encoder) { + super(encoder); this.component2D = component2D; - this.encoder = encoder; } /** If the relationship has been honour. */ @@ -32,59 +31,24 @@ private Component2DVisitor(Component2D component2D, CoordinateEncoder encoder) { public abstract void reset(); @Override - public void visitPoint(int x, int y) { - doVisitPoint(encoder.decodeX(x), encoder.decodeY(y)); - } - - abstract void doVisitPoint(double x, double y); - - @Override - public void visitLine(int aX, int aY, int bX, int bY, byte metadata) { - doVisitLine(encoder.decodeX(aX), encoder.decodeY(aY), encoder.decodeX(bX), encoder.decodeY(bY), metadata); - } - - abstract void doVisitLine(double aX, double aY, double bX, double bY, byte metadata); - - @Override - public void visitTriangle(int aX, int aY, int bX, int bY, int cX, int cY, byte metadata) { - doVisitTriangle( - encoder.decodeX(aX), - encoder.decodeY(aY), - encoder.decodeX(bX), - encoder.decodeY(bY), - encoder.decodeX(cX), - encoder.decodeY(cY), - metadata - ); - } - - abstract void doVisitTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata); - - @Override - public boolean pushX(int minX) { - return component2D.getMaxX() >= encoder.decodeX(minX); + public boolean pushDecodedX(double minX) { + return component2D.getMaxX() >= minX; } @Override - public boolean pushY(int minY) { - return component2D.getMaxY() >= encoder.decodeY(minY); + public boolean pushDecodedY(double minY) { + return component2D.getMaxY() >= minY; } @Override - public boolean push(int maxX, int maxY) { - return component2D.getMinX() <= encoder.decodeX(maxX) && component2D.getMinY() <= encoder.decodeY(maxY); + public boolean pushDecoded(double maxX, double maxY) { + return component2D.getMinX() <= maxX && component2D.getMinY() <= maxY; } @Override - public boolean push(int minX, int minY, int maxX, int maxY) { - final PointValues.Relation relation = component2D.relate( - encoder.decodeX(minX), - encoder.decodeX(maxX), - encoder.decodeY(minY), - encoder.decodeY(maxY) - ); - return doPush(relation); + public boolean pushDecoded(double minX, double minY, double maxX, double maxY) { + return pushDecoded(component2D.relate(minX, maxX, minY, maxY)); } /** Relation between the query shape and the doc value bounding box. Depending on the query relationship, @@ -92,7 +56,7 @@ public boolean push(int minX, int minY, int maxX, int maxY) { * * @return if true, the visitor keeps traversing the tree, else it stops. * */ - abstract boolean doPush(PointValues.Relation relation); + abstract boolean pushDecoded(PointValues.Relation relation); /** * Creates a visitor from the provided Component2D and spatial relationship. Visitors are re-usable by @@ -131,17 +95,17 @@ public void reset() { } @Override - void doVisitPoint(double x, double y) { + void visitDecodedPoint(double x, double y) { intersects = component2D.contains(x, y); } @Override - void doVisitLine(double aX, double aY, double bX, double bY, byte metadata) { + void visitDecodedLine(double aX, double aY, double bX, double bY, byte metadata) { intersects = component2D.intersectsLine(aX, aY, bX, bY); } @Override - void doVisitTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata) { + void visitDecodedTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata) { intersects = component2D.intersectsTriangle(aX, aY, bX, bY, cX, cY); } @@ -152,7 +116,7 @@ public boolean push() { } @Override - boolean doPush(PointValues.Relation relation) { + boolean pushDecoded(PointValues.Relation relation) { if (relation == PointValues.Relation.CELL_OUTSIDE_QUERY) { // shapes are disjoint, stop traversing the tree. return false; @@ -192,17 +156,17 @@ public void reset() { } @Override - void doVisitPoint(double x, double y) { + void visitDecodedPoint(double x, double y) { disjoint = component2D.contains(x, y) == false; } @Override - void doVisitLine(double aX, double aY, double bX, double bY, byte metadata) { + void visitDecodedLine(double aX, double aY, double bX, double bY, byte metadata) { disjoint = component2D.intersectsLine(aX, aY, bX, bY) == false; } @Override - void doVisitTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata) { + void visitDecodedTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata) { disjoint = component2D.intersectsTriangle(aX, aY, bX, bY, cX, cY) == false; } @@ -213,7 +177,7 @@ public boolean push() { } @Override - boolean doPush(PointValues.Relation relation) { + boolean pushDecoded(PointValues.Relation relation) { if (relation == PointValues.Relation.CELL_OUTSIDE_QUERY) { // shapes are disjoint, stop traversing the tree. return false; @@ -253,17 +217,17 @@ public void reset() { } @Override - void doVisitPoint(double x, double y) { + void visitDecodedPoint(double x, double y) { within = component2D.contains(x, y); } @Override - void doVisitLine(double aX, double aY, double bX, double bY, byte metadata) { + void visitDecodedLine(double aX, double aY, double bX, double bY, byte metadata) { within = component2D.containsLine(aX, aY, bX, bY); } @Override - void doVisitTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata) { + void visitDecodedTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata) { within = component2D.containsTriangle(aX, aY, bX, bY, cX, cY); } @@ -298,7 +262,7 @@ public boolean push(int maxX, int maxY) { } @Override - boolean doPush(PointValues.Relation relation) { + boolean pushDecoded(PointValues.Relation relation) { if (relation == PointValues.Relation.CELL_OUTSIDE_QUERY) { // shapes are disjoint, stop traversing the tree. within = false; @@ -333,7 +297,7 @@ public void reset() { } @Override - void doVisitPoint(double x, double y) { + void visitDecodedPoint(double x, double y) { final Component2D.WithinRelation rel = component2D.withinPoint(x, y); if (rel != Component2D.WithinRelation.DISJOINT) { // Only override relationship if different to DISJOINT @@ -342,7 +306,7 @@ void doVisitPoint(double x, double y) { } @Override - void doVisitLine(double aX, double aY, double bX, double bY, byte metadata) { + void visitDecodedLine(double aX, double aY, double bX, double bY, byte metadata) { final boolean ab = (metadata & 1 << 4) == 1 << 4; final Component2D.WithinRelation rel = component2D.withinLine(aX, aY, ab, bX, bY); if (rel != Component2D.WithinRelation.DISJOINT) { @@ -352,7 +316,7 @@ void doVisitLine(double aX, double aY, double bX, double bY, byte metadata) { } @Override - void doVisitTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata) { + void visitDecodedTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata) { final boolean ab = (metadata & 1 << 4) == 1 << 4; final boolean bc = (metadata & 1 << 5) == 1 << 5; final boolean ca = (metadata & 1 << 6) == 1 << 6; @@ -370,7 +334,7 @@ public boolean push() { } @Override - boolean doPush(PointValues.Relation relation) { + boolean pushDecoded(PointValues.Relation relation) { // Only traverse the tree if the shapes intersects. return relation == PointValues.Relation.CELL_CROSSES_QUERY; } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/LabelPositionVisitor.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/LabelPositionVisitor.java index 812a5761fd518..e7a6aed4d6c9d 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/LabelPositionVisitor.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/LabelPositionVisitor.java @@ -15,78 +15,64 @@ * * TODO: We could instead choose the point closer to the centroid which improves unbalanced trees */ -public class LabelPositionVisitor implements TriangleTreeReader.Visitor { +public class LabelPositionVisitor extends TriangleTreeReader.DecodedVisitor { private T labelPosition; - private final CoordinateEncoder encoder; private final BiFunction pointMaker; public LabelPositionVisitor(CoordinateEncoder encoder, BiFunction pointMaker) { - this.encoder = encoder; + super(encoder); this.pointMaker = pointMaker; } @Override - public void visitPoint(int xi, int yi) { - double x = encoder.decodeX(xi); - double y = encoder.decodeY(yi); - // System.out.println("Got point: (" + x + "," + y + ")"); + void visitDecodedPoint(double x, double y) { assert labelPosition == null; labelPosition = pointMaker.apply(x, y); } @Override - public void visitLine(int aXi, int aYi, int bXi, int bYi, byte metadata) { - double aX = encoder.decodeX(aXi); - double aY = encoder.decodeY(aYi); - double bX = encoder.decodeX(bXi); - double bY = encoder.decodeY(bYi); - // System.out.println("Got line: (" + aX + "," + aY + ")-(" + bX + "," + bY + ")"); + public void visitDecodedLine(double aX, double aY, double bX, double bY, byte metadata) { assert labelPosition == null; labelPosition = pointMaker.apply((aX + bX) / 2.0, (aY + bY) / 2.0); } @Override - public void visitTriangle(int aXi, int aYi, int bXi, int bYi, int cXi, int cYi, byte metadata) { - double aX = encoder.decodeX(aXi); - double aY = encoder.decodeY(aYi); - double bX = encoder.decodeX(bXi); - double bY = encoder.decodeY(bYi); - double cX = encoder.decodeX(cXi); - double cY = encoder.decodeY(cYi); - // System.out.println("Got triangle: (" + aX + "," + aY + ")-(" + bX + "," + bY + ")-(" + cX + "," + cY + ")"); + public void visitDecodedTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata) { assert labelPosition == null; labelPosition = pointMaker.apply((aX + bX + cX) / 3.0, (aY + bY + cY) / 3.0); } @Override - public boolean push() { - // Don't traverse deeper once we found a result + boolean pushDecodedX(double minX) { return labelPosition == null; } @Override - public boolean pushX(int minX) { - // Don't traverse deeper once we found a result + boolean pushDecodedY(double minX) { return labelPosition == null; } @Override - public boolean pushY(int minY) { + public boolean push() { // Don't traverse deeper once we found a result return labelPosition == null; } @Override - public boolean push(int maxX, int maxY) { - // Don't traverse deeper once we found a result + boolean pushDecoded(double maxX, double maxY) { return labelPosition == null; } @Override public boolean push(int minX, int minY, int maxX, int maxY) { // Always start the traversal - return true; + return labelPosition == null; + } + + @Override + boolean pushDecoded(double minX, double minY, double maxX, double maxY) { + return labelPosition == null; } public T labelPosition() { diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeReader.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeReader.java index 46e7baef0f08d..0bb02f40b5057 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeReader.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/TriangleTreeReader.java @@ -165,4 +165,92 @@ interface Visitor { * visiting the tree. */ boolean push(int minX, int minY, int maxX, int maxY); } + + /** Visitor for triangle interval tree which decodes the coordinates */ + public abstract static class DecodedVisitor implements Visitor { + + private final CoordinateEncoder encoder; + + DecodedVisitor(CoordinateEncoder encoder) { + this.encoder = encoder; + } + + @Override + public void visitPoint(int x, int y) { + visitDecodedPoint(encoder.decodeX(x), encoder.decodeY(y)); + } + + /** + * Equivalent to {@link #visitPoint(int, int)} but coordinates are decoded. + */ + abstract void visitDecodedPoint(double x, double y); + + @Override + public void visitLine(int aX, int aY, int bX, int bY, byte metadata) { + visitDecodedLine(encoder.decodeX(aX), encoder.decodeY(aY), encoder.decodeX(bX), encoder.decodeY(bY), metadata); + } + + /** + * Equivalent to {@link #visitLine(int, int, int, int, byte)} but coordinates are decoded. + */ + abstract void visitDecodedLine(double aX, double aY, double bX, double bY, byte metadata); + + @Override + public void visitTriangle(int aX, int aY, int bX, int bY, int cX, int cY, byte metadata) { + visitDecodedTriangle( + encoder.decodeX(aX), + encoder.decodeY(aY), + encoder.decodeX(bX), + encoder.decodeY(bY), + encoder.decodeX(cX), + encoder.decodeY(cY), + metadata + ); + } + + /** + * Equivalent to {@link #visitTriangle(int, int, int, int, int, int, byte)} but coordinates are decoded. + */ + abstract void visitDecodedTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata); + + @Override + public boolean pushX(int minX) { + return pushDecodedX(encoder.decodeX(minX)); + } + + /** + * Equivalent to {@link #pushX(int)} but coordinates are decoded. + */ + abstract boolean pushDecodedX(double minX); + + @Override + public boolean pushY(int minY) { + return pushDecodedY(encoder.decodeY(minY)); + } + + /** + * Equivalent to {@link #pushY(int)} but coordinates are decoded. + */ + abstract boolean pushDecodedY(double minX); + + @Override + public boolean push(int maxX, int maxY) { + return pushDecoded(encoder.decodeX(maxX), encoder.decodeY(maxY)); + } + + /** + * Equivalent to {@link #push(int, int)} but coordinates are decoded. + */ + abstract boolean pushDecoded(double maxX, double maxY); + + @Override + public boolean push(int minX, int minY, int maxX, int maxY) { + return pushDecoded(encoder.decodeX(minX), encoder.decodeY(minY), encoder.decodeX(maxX), encoder.decodeY(maxY)); + } + + /** + * Equivalent to {@link #push(int, int, int, int)} but coordinates are decoded. + */ + abstract boolean pushDecoded(double minX, double minY, double maxX, double maxY); + } } From 189f279b4f80b94ce268df9cf58bc40006928330 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Wed, 17 Aug 2022 10:00:16 +0100 Subject: [PATCH 230/265] Don't modify source map when parsing composite runtime field (#89114) When calling RuntimeField.parseRuntimeFields() for fields defined in the search request, we need to wrap the Map containing field definitions in another Map that supports value removal, so that we don't inadvertently remove the definitions from the root request. CompositeRuntimeField was not doing this extra wrapping, which meant that requests that went to multiple shards and that therefore parsed the definitions multiple times would throw an error complaining that the fields parameter was missing, because the root request had been modified. --- docs/changelog/89114.yaml | 5 +++ .../111_search_time_composite.yml | 36 +++++++++++++++++++ .../index/mapper/CompositeRuntimeField.java | 13 +++---- 3 files changed, 48 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/89114.yaml create mode 100644 modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/111_search_time_composite.yml diff --git a/docs/changelog/89114.yaml b/docs/changelog/89114.yaml new file mode 100644 index 0000000000000..c363437586249 --- /dev/null +++ b/docs/changelog/89114.yaml @@ -0,0 +1,5 @@ +pr: 89114 +summary: Don't modify source map when parsing composite runtime field +area: Mapping +type: bug +issues: [] diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/111_search_time_composite.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/111_search_time_composite.yml new file mode 100644 index 0000000000000..dc10d16c26ca3 --- /dev/null +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/111_search_time_composite.yml @@ -0,0 +1,36 @@ +--- +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 2 + number_of_replicas: 0 + - do: + bulk: + index: test + refresh: true + body: | + {"index":{}} + {"A":2} + +--- +"search-time composite across multiple shards": + - do: + search: + index: test + body: + query: + term: + "r.shouldReturn" : true + runtime_mappings: + r: + type: composite + fields: + shouldReturn: + type: boolean + script: + source: "emit('shouldReturn',true)" + + - match: {hits.total.value: 1} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java b/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java index a8e9b487160de..ea8fbb0dcd60b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java @@ -17,6 +17,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -85,7 +86,12 @@ protected RuntimeField createRuntimeField(MappingParserContext parserContext) { name, lookup -> factory.newFactory(name, script.get().getParams(), lookup) ); - Map runtimeFields = RuntimeField.parseRuntimeFields(fields.getValue(), parserContext, builder, false); + Map runtimeFields = RuntimeField.parseRuntimeFields( + new HashMap<>(fields.getValue()), + parserContext, + builder, + false + ); return new CompositeRuntimeField(name, getParameters(), runtimeFields.values()); } }); @@ -118,11 +124,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws for (FieldMapper.Parameter parameter : parameters) { parameter.toXContent(builder, includeDefaults); } - builder.startObject("fields"); - for (RuntimeField subfield : subfields) { - subfield.toXContent(builder, params); - } - builder.endObject(); builder.endObject(); return builder; } From 3c30674a3b5d9bc2947da01350eaa2d78d56645e Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 17 Aug 2022 11:47:24 +0200 Subject: [PATCH 231/265] Fix ConcurrentSnapshotsIT.testAssertMultipleSnapshotsAndPrimaryFailOver (#89413) We have to wait for the snapshot to actually have started before we restart the data node. This is not guaranteed since we start the snapshot via an async client call. Otherwise the expectation of partial snapshot failure will not hold because we will only partially fail if the data node has actually started work on the snapshot when it's restarted. closes #89039 --- .../java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index 3d83b0d576288..22a6af9ffaa41 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -521,6 +521,7 @@ public void testAssertMultipleSnapshotsAndPrimaryFailOver() throws Exception { final String secondSnapshot = "snapshot-two"; final ActionFuture secondSnapshotResponse = startFullSnapshotFromMasterClient(repoName, secondSnapshot); + awaitNumberOfSnapshotsInProgress(2); internalCluster().restartNode(dataNode); From 5d6af5890f70cfdb234240f7a1d8c65c6339315d Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 17 Aug 2022 12:53:17 +0200 Subject: [PATCH 232/265] GeoShapeValue can determine the spatial relationship with a LatLonGeometry (#89415) This PR adds a new method to GeoShape value called GeoRelation relate(LatLonGeometry latLonGeometry) that replaces boolean intersects(Geometry geometry). --- .../search/GeoShapeScriptDocValuesIT.java | 10 +- .../index/fielddata/GeoShapeValues.java | 39 +++--- .../LatLonGeometryRelationVisitor.java | 116 ++++++++++++++++++ .../fielddata/GeometryDocValueTests.java | 10 +- .../LatLonGeometryRelationVisitorTests.java | 78 ++++++++++++ 5 files changed, 226 insertions(+), 27 deletions(-) create mode 100644 x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitor.java create mode 100644 x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitorTests.java diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeScriptDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeScriptDocValuesIT.java index da831d85f554e..9ae7726e32a44 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeScriptDocValuesIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeScriptDocValuesIT.java @@ -6,12 +6,12 @@ */ package org.elasticsearch.xpack.spatial.search; +import org.apache.lucene.geo.Circle; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.common.geo.Orientation; import org.elasticsearch.geo.GeometryTestUtils; -import org.elasticsearch.geometry.Circle; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Line; import org.elasticsearch.geometry.LinearRing; @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; import org.elasticsearch.xpack.spatial.index.fielddata.DimensionalShapeType; +import org.elasticsearch.xpack.spatial.index.fielddata.GeoRelation; import org.elasticsearch.xpack.spatial.index.fielddata.GeoShapeValues; import org.elasticsearch.xpack.spatial.util.GeoTestUtils; import org.hamcrest.Matchers; @@ -273,8 +274,11 @@ private void doTestGeometry(Geometry geometry, GeoShapeValues.GeoShapeValue expe // Check label position is in the geometry, but with a tolerance constructed as a circle of 1m radius to handle quantization Point labelPosition = new Point(fields.get("label_lon").getValue(), fields.get("label_lat").getValue()); - Circle tolerance = new Circle(labelPosition.getX(), labelPosition.getY(), 1); - assertTrue("Expect label position " + labelPosition + " to intersect geometry " + geometry, value.intersects(tolerance)); + Circle tolerance = new Circle(labelPosition.getY(), labelPosition.getX(), 1); + assertTrue( + "Expect label position " + labelPosition + " to intersect geometry " + geometry, + value.relate(tolerance) != GeoRelation.QUERY_DISJOINT + ); // Check that the label position is the expected one, or the centroid in certain polygon cases if (expectedLabelPosition != null) { diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java index 631ca71866d1d..77a3c0a3335b8 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java @@ -9,16 +9,14 @@ import org.apache.lucene.document.ShapeField; import org.apache.lucene.geo.LatLonGeometry; +import org.apache.lucene.geo.Point; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.Point; import org.elasticsearch.geometry.utils.GeographyValidator; import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.index.mapper.GeoShapeIndexer; -import org.elasticsearch.index.mapper.GeoShapeQueryable; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -93,11 +91,13 @@ public static class GeoShapeValue implements ToXContentFragment { private final GeometryDocValueReader reader; private final BoundingBox boundingBox; private final Tile2DVisitor tile2DVisitor; + private final LatLonGeometryRelationVisitor component2DRelationVisitor; public GeoShapeValue() { this.reader = new GeometryDocValueReader(); this.boundingBox = new BoundingBox(); this.tile2DVisitor = new Tile2DVisitor(); + this.component2DRelationVisitor = new LatLonGeometryRelationVisitor(CoordinateEncoder.GEO); } /** @@ -117,8 +117,16 @@ public BoundingBox boundingBox() { */ public GeoPoint labelPosition() throws IOException { // For polygons we prefer to use the centroid, as long as it is within the polygon - if (reader.getDimensionalShapeType() == DimensionalShapeType.POLYGON && intersects(new Point(lon(), lat()))) { - return new GeoPoint(lat(), lon()); + if (reader.getDimensionalShapeType() == DimensionalShapeType.POLYGON) { + Component2DVisitor visitor = Component2DVisitor.getVisitor( + LatLonGeometry.create(new Point(lat(), lon())), + ShapeField.QueryRelation.INTERSECTS, + CoordinateEncoder.GEO + ); + reader.visit(visitor); + if (visitor.matches()) { + return new GeoPoint(lat(), lon()); + } } // For all other cases, use the first triangle (or line or point) in the tree which will always intersect the shape LabelPositionVisitor visitor = new LabelPositionVisitor<>(CoordinateEncoder.GEO, (x, y) -> new GeoPoint(y, x)); @@ -137,21 +145,14 @@ public GeoRelation relate(int minX, int maxX, int minY, int maxY) throws IOExcep } /** - * Determine if the current shape value intersects the specified geometry. - * Note that the intersection must be true in quantized space, so it is possible that - * points on the edges of geometries will return false due to quantization shifting them off the geometry. - * To deal with this, one option is to pass in a circle around the point with a 1m radius - * which is enough to cover the resolution of the quantization. + * Determine the {@link GeoRelation} between the current shape and a {@link LatLonGeometry}. It only supports + * simple geometries, therefore it will fail if the LatLonGeometry is a {@link org.apache.lucene.geo.Rectangle} + * that crosses the dateline. */ - public boolean intersects(Geometry geometry) throws IOException { - LatLonGeometry[] latLonGeometries = GeoShapeQueryable.toQuantizeLuceneGeometry(geometry, ShapeRelation.INTERSECTS); - Component2DVisitor visitor = Component2DVisitor.getVisitor( - LatLonGeometry.create(latLonGeometries), - ShapeField.QueryRelation.INTERSECTS, - CoordinateEncoder.GEO - ); - reader.visit(visitor); - return visitor.matches(); + public GeoRelation relate(LatLonGeometry latLonGeometry) throws IOException { + component2DRelationVisitor.reset(latLonGeometry); + reader.visit(component2DRelationVisitor); + return component2DRelationVisitor.relation(); } public DimensionalShapeType dimensionalShapeType() { diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitor.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitor.java new file mode 100644 index 0000000000000..1a0eef457abb4 --- /dev/null +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitor.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.index.fielddata; + +import org.apache.lucene.geo.Component2D; +import org.apache.lucene.geo.LatLonGeometry; +import org.apache.lucene.index.PointValues; + +/** + * A reusable tree reader visitor for a previous serialized {@link org.elasticsearch.geometry.Geometry} using + * {@link TriangleTreeWriter}. + * + * This class supports checking {@link LatLonGeometry} relations against a serialized triangle tree. + * It does not support bounding boxes crossing the dateline. + * + */ +class LatLonGeometryRelationVisitor extends TriangleTreeReader.DecodedVisitor { + + private GeoRelation relation; + private Component2D component2D; + + LatLonGeometryRelationVisitor(CoordinateEncoder encoder) { + super(encoder); + } + + public void reset(LatLonGeometry latLonGeometry) { + component2D = LatLonGeometry.create(latLonGeometry); + relation = GeoRelation.QUERY_DISJOINT; + } + + /** + * return the computed relation. + */ + public GeoRelation relation() { + return relation; + } + + @Override + void visitDecodedPoint(double x, double y) { + if (component2D.contains(x, y)) { + if (component2D.withinPoint(x, y) == Component2D.WithinRelation.CANDIDATE) { + relation = GeoRelation.QUERY_INSIDE; + } else { + relation = GeoRelation.QUERY_CROSSES; + } + } + } + + @Override + void visitDecodedLine(double aX, double aY, double bX, double bY, byte metadata) { + if (component2D.intersectsLine(aX, aY, bX, bY)) { + final boolean ab = (metadata & 1 << 4) == 1 << 4; + if (component2D.withinLine(aX, aY, ab, bX, bY) == Component2D.WithinRelation.CANDIDATE) { + relation = GeoRelation.QUERY_INSIDE; + } else { + relation = GeoRelation.QUERY_CROSSES; + } + } + } + + @Override + void visitDecodedTriangle(double aX, double aY, double bX, double bY, double cX, double cY, byte metadata) { + if (component2D.intersectsTriangle(aX, aY, bX, bY, cX, cY)) { + boolean ab = (metadata & 1 << 4) == 1 << 4; + boolean bc = (metadata & 1 << 5) == 1 << 5; + boolean ca = (metadata & 1 << 6) == 1 << 6; + if (component2D.withinTriangle(aX, aY, ab, bX, bY, bc, cX, cY, ca) == Component2D.WithinRelation.CANDIDATE) { + relation = GeoRelation.QUERY_INSIDE; + } else { + relation = GeoRelation.QUERY_CROSSES; + } + } + } + + @Override + public boolean push() { + return relation != GeoRelation.QUERY_CROSSES; + } + + @Override + public boolean pushDecodedX(double minX) { + return component2D.getMaxX() >= minX; + } + + @Override + public boolean pushDecodedY(double minY) { + return component2D.getMaxY() >= minY; + } + + @Override + public boolean pushDecoded(double maxX, double maxY) { + return component2D.getMinY() <= maxY && component2D.getMinX() <= maxX; + } + + @Override + @SuppressWarnings("HiddenField") + public boolean pushDecoded(double minX, double minY, double maxX, double maxY) { + PointValues.Relation rel = component2D.relate(minX, maxX, minY, maxY); + if (rel == PointValues.Relation.CELL_OUTSIDE_QUERY) { + // shapes are disjoint + relation = GeoRelation.QUERY_DISJOINT; + return false; + } + if (rel == PointValues.Relation.CELL_INSIDE_QUERY) { + // the rectangle fully contains the shape + relation = GeoRelation.QUERY_CROSSES; + return false; + } + return true; + } +} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java index 1026f2eaa246d..7e21541518836 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueTests.java @@ -7,10 +7,10 @@ package org.elasticsearch.xpack.spatial.index.fielddata; +import org.apache.lucene.geo.Circle; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.geometry.Circle; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.GeometryCollection; import org.elasticsearch.geometry.LinearRing; @@ -182,8 +182,8 @@ public void testAntarcticaLabelPosition() throws Exception { double centroidY = CoordinateEncoder.GEO.decodeY(reader.getCentroidY()); assertEquals(centroidX, labelPosition.lon(), 0.0000001); assertEquals(centroidY, labelPosition.lat(), 0.0000001); - Circle tolerance = new Circle(centroidX, centroidY, 1); - assertTrue("Expect label position to be within the geometry", shapeValue.intersects(tolerance)); + Circle tolerance = new Circle(centroidY, centroidX, 1); + assertTrue("Expect label position to be within the geometry", shapeValue.relate(tolerance) != GeoRelation.QUERY_DISJOINT); } public void testFranceLabelPosition() throws Exception { @@ -197,8 +197,8 @@ public void testFranceLabelPosition() throws Exception { double centroidY = CoordinateEncoder.GEO.decodeY(reader.getCentroidY()); assertEquals(centroidX, labelPosition.lon(), 0.0000001); assertEquals(centroidY, labelPosition.lat(), 0.0000001); - Circle tolerance = new Circle(centroidX, centroidY, 1); - assertTrue("Expect label position to be within the geometry", shapeValue.intersects(tolerance)); + Circle tolerance = new Circle(centroidY, centroidX, 1); + assertTrue("Expect label position to be within the geometry", shapeValue.relate(tolerance) != GeoRelation.QUERY_DISJOINT); } private Geometry loadResourceAsGeometry(String filename) throws IOException, ParseException { diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitorTests.java new file mode 100644 index 0000000000000..3a0d0bcd03562 --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitorTests.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.index.fielddata; + +import org.apache.lucene.document.ShapeField; +import org.apache.lucene.geo.Component2D; +import org.apache.lucene.geo.LatLonGeometry; +import org.apache.lucene.tests.geo.GeoTestUtil; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.spatial.util.GeoTestUtils; + +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class LatLonGeometryRelationVisitorTests extends ESTestCase { + + public void testPoint() throws Exception { + doTestShapes(GeoTestUtil::nextPoint); + } + + public void testLine() throws Exception { + doTestShapes(GeoTestUtil::nextLine); + } + + public void testPolygon() throws Exception { + doTestShapes(GeoTestUtil::nextPolygon); + } + + private void doTestShapes(Supplier supplier) throws Exception { + Geometry geometry = GeometryTestUtils.randomGeometryWithoutCircle(0, false); + GeoShapeValues.GeoShapeValue geoShapeValue = GeoTestUtils.geoShapeValue(geometry); + GeometryDocValueReader reader = GeoTestUtils.geometryDocValueReader(geometry, CoordinateEncoder.GEO); + for (int i = 0; i < 1000; i++) { + LatLonGeometry latLonGeometry = supplier.get(); + GeoRelation relation = geoShapeValue.relate(latLonGeometry); + Component2D component2D = LatLonGeometry.create(latLonGeometry); + Component2DVisitor contains = Component2DVisitor.getVisitor( + component2D, + ShapeField.QueryRelation.CONTAINS, + CoordinateEncoder.GEO + ); + reader.visit(contains); + Component2DVisitor intersects = Component2DVisitor.getVisitor( + component2D, + ShapeField.QueryRelation.INTERSECTS, + CoordinateEncoder.GEO + ); + reader.visit(intersects); + Component2DVisitor disjoint = Component2DVisitor.getVisitor( + component2D, + ShapeField.QueryRelation.DISJOINT, + CoordinateEncoder.GEO + ); + reader.visit(disjoint); + if (relation == GeoRelation.QUERY_INSIDE) { + assertThat(contains.matches(), equalTo(true)); + assertThat(intersects.matches(), equalTo(true)); + assertThat(disjoint.matches(), equalTo(false)); + } else if (relation == GeoRelation.QUERY_CROSSES) { + assertThat(contains.matches(), equalTo(false)); + assertThat(intersects.matches(), equalTo(true)); + assertThat(disjoint.matches(), equalTo(false)); + } else { + assertThat(contains.matches(), equalTo(false)); + assertThat(intersects.matches(), equalTo(false)); + assertThat(disjoint.matches(), equalTo(true)); + } + } + } +} From f1071cab36ec7105d63cfefe7c9ad19f7628cac3 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 17 Aug 2022 14:27:36 +0200 Subject: [PATCH 233/265] Remove side-effects in streams in PrimaryShardAllocator (#89218) `map` should be a side-effect free function, because it's a non-terminal operation. If we want to have side effect, we should use `forEach` which is terminal. --- .../gateway/PrimaryShardAllocator.java | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 15716076f9235..22b60768890b4 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -242,19 +242,18 @@ private static List buildNodeDecisions( Collection ineligibleShards; if (nodesToAllocate != null) { final Set discoNodes = new HashSet<>(); - nodeResults.addAll( - Stream.of(nodesToAllocate.yesNodeShards, nodesToAllocate.throttleNodeShards, nodesToAllocate.noNodeShards) - .flatMap(Collection::stream) - .map(dnode -> { - discoNodes.add(dnode.nodeShardState.getNode()); - return new NodeAllocationResult( + Stream.of(nodesToAllocate.yesNodeShards, nodesToAllocate.throttleNodeShards, nodesToAllocate.noNodeShards) + .flatMap(Collection::stream) + .forEach(dnode -> { + discoNodes.add(dnode.nodeShardState.getNode()); + nodeResults.add( + new NodeAllocationResult( dnode.nodeShardState.getNode(), shardStoreInfo(dnode.nodeShardState, inSyncAllocationIds), dnode.decision - ); - }) - .toList() - ); + ) + ); + }); ineligibleShards = fetchedShardData.getData() .values() .stream() From 09d00259f4de5cb34b3750bcc22ae5a2b2d38adc Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 17 Aug 2022 08:28:01 -0400 Subject: [PATCH 234/265] Graph: fix race condition in timeout (#88946) Previously `graph` checked if the request timed out, then spent some time doing work, then passed the timeout on to the next request. Over and over again. It's quite possible that the response may not have timed out for the first check but would have timed out for the second check. This manifests as the timeout being sent to the next hop being a negative number of milliseconds. We don't allow this sort of thing. This fixes this by moving the timeout check to the same spot it is read for setting the timeout on the next request - we just check if its `> 0` to find the timeouts. This does keep the request running slightly longer after it's officially timed out - but it's just long enough to prepare the next layer of request. Usually microseconds. Which should be fine. Closes #55396 --- docs/changelog/88946.yaml | 6 +++ .../xpack/graph/test/GraphTests.java | 5 +-- .../action/TransportGraphExploreAction.java | 40 +++++++------------ 3 files changed, 23 insertions(+), 28 deletions(-) create mode 100644 docs/changelog/88946.yaml diff --git a/docs/changelog/88946.yaml b/docs/changelog/88946.yaml new file mode 100644 index 0000000000000..ae853f2e5ffa9 --- /dev/null +++ b/docs/changelog/88946.yaml @@ -0,0 +1,6 @@ +pr: 88946 +summary: "Graph: fix race condition in timeout" +area: Graph +type: bug +issues: + - 55396 diff --git a/x-pack/plugin/graph/src/internalClusterTest/java/org/elasticsearch/xpack/graph/test/GraphTests.java b/x-pack/plugin/graph/src/internalClusterTest/java/org/elasticsearch/xpack/graph/test/GraphTests.java index 7623fffa777f5..e9178675bd1a6 100644 --- a/x-pack/plugin/graph/src/internalClusterTest/java/org/elasticsearch/xpack/graph/test/GraphTests.java +++ b/x-pack/plugin/graph/src/internalClusterTest/java/org/elasticsearch/xpack/graph/test/GraphTests.java @@ -222,19 +222,18 @@ public void testPopularityQueryCrawl() { assertNull("Elvis is a 3rd tier connection so should not be returned here", response.getVertex(Vertex.createId("people", "elvis"))); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/55396") public void testTimedoutQueryCrawl() { GraphExploreRequestBuilder grb = new GraphExploreRequestBuilder(client(), GraphExploreAction.INSTANCE).setIndices("test"); grb.setTimeout(TimeValue.timeValueMillis(400)); Hop hop1 = grb.createNextHop(QueryBuilders.termQuery("description", "beatles")); hop1.addVertexRequest("people").size(10).minDocCount(1); // members of beatles - // 00s friends of beatles - grb.createNextHop(QueryBuilders.termQuery("decade", "00s")).addVertexRequest("people").size(100).minDocCount(1); // A query that should cause a timeout ScriptQueryBuilder timeoutQuery = QueryBuilders.scriptQuery( new Script(ScriptType.INLINE, "mockscript", "graph_timeout", Collections.emptyMap()) ); grb.createNextHop(timeoutQuery).addVertexRequest("people").size(100).minDocCount(1); + // 00s friends of beatles + grb.createNextHop(QueryBuilders.termQuery("decade", "00s")).addVertexRequest("people").size(100).minDocCount(1); GraphExploreResponse response = grb.get(); assertTrue(response.isTimedOut()); diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java index b50cd54bbbe40..93c28f63b0e8b 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.graph.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; @@ -61,13 +63,13 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.concurrent.atomic.AtomicBoolean; /** * Performs a series of elasticsearch queries and aggregations to explore * connected terms in a single index. */ public class TransportGraphExploreAction extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportGraphExploreAction.class); private final ThreadPool threadPool; private final NodeClient client; @@ -115,7 +117,6 @@ class AsyncGraphAction { private final ActionListener listener; private final long startTime; - private final AtomicBoolean timedOut; private volatile ShardOperationFailedException[] shardFailures; private Map vertices = new HashMap<>(); private Map connections = new HashMap<>(); @@ -128,7 +129,6 @@ class AsyncGraphAction { this.request = request; this.listener = listener; this.startTime = threadPool.relativeTimeInMillis(); - this.timedOut = new AtomicBoolean(false); this.shardFailures = ShardSearchFailure.EMPTY_ARRAY; } @@ -173,16 +173,11 @@ private void removeVertex(Vertex vertex) { * connections */ synchronized void expand() { - if (hasTimedOut()) { - timedOut.set(true); - listener.onResponse(buildResponse()); - return; - } Map> lastHopFindings = hopFindings.get(currentHopNumber); if ((currentHopNumber >= (request.getHopNumbers() - 1)) || (lastHopFindings == null) || (lastHopFindings.size() == 0)) { // Either we gathered no leads from the last hop or we have // reached the final hop - listener.onResponse(buildResponse()); + listener.onResponse(buildResponse(false)); return; } Hop lastHop = request.getHop(currentHopNumber); @@ -318,16 +313,22 @@ synchronized void expand() { // Execute the search SearchSourceBuilder source = new SearchSourceBuilder().query(rootBool).aggregation(sampleAgg).size(0); if (request.timeout() != null) { - source.timeout(TimeValue.timeValueMillis(timeRemainingMillis())); + // Actual resolution of timer is granularity of the interval + // configured globally for updating estimated time. + long timeRemainingMillis = startTime + request.timeout().millis() - threadPool.relativeTimeInMillis(); + if (timeRemainingMillis <= 0) { + listener.onResponse(buildResponse(true)); + return; + } + + source.timeout(TimeValue.timeValueMillis(timeRemainingMillis)); } searchRequest.source(source); - // System.out.println(source); logger.trace("executing expansion graph search request"); client.search(searchRequest, new ActionListener.Delegating<>(listener) { @Override public void onResponse(SearchResponse searchResponse) { - // System.out.println(searchResponse); addShardFailures(searchResponse.getShardFailures()); ArrayList newConnections = new ArrayList(); @@ -676,7 +677,6 @@ public synchronized void start() { source.timeout(request.timeout()); } searchRequest.source(source); - // System.out.println(source); logger.trace("executing initial graph search request"); client.search(searchRequest, new ActionListener.Delegating<>(listener) { @Override @@ -774,16 +774,6 @@ private void addNormalizedBoosts(BoolQueryBuilder includesContainer, VertexReque } } - boolean hasTimedOut() { - return request.timeout() != null && (timeRemainingMillis() <= 0); - } - - long timeRemainingMillis() { - // Actual resolution of timer is granularity of the interval - // configured globally for updating estimated time. - return (startTime + request.timeout().millis()) - threadPool.relativeTimeInMillis(); - } - void addShardFailures(ShardOperationFailedException[] failures) { if (CollectionUtils.isEmpty(failures) == false) { ShardOperationFailedException[] duplicates = new ShardOperationFailedException[shardFailures.length + failures.length]; @@ -793,9 +783,9 @@ void addShardFailures(ShardOperationFailedException[] failures) { } } - protected GraphExploreResponse buildResponse() { + protected GraphExploreResponse buildResponse(boolean timedOut) { long took = threadPool.relativeTimeInMillis() - startTime; - return new GraphExploreResponse(took, timedOut.get(), shardFailures, vertices, connections, request.returnDetailedInfo()); + return new GraphExploreResponse(took, timedOut, shardFailures, vertices, connections, request.returnDetailedInfo()); } } From c038a91c60305a6745630ffded9b7930600e919d Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 17 Aug 2022 14:39:12 +0200 Subject: [PATCH 235/265] Assign the right path to objects merged when parsing mappings (#89389) When parsing mappings, we may find a field with same name specified twice, either because JSON duplicate keys are allowed, or because a mix of object notation and dot notation is used when providing mappings. The same can happen when applying dynamic mappings as part of parsing an incoming document, as well as when merging separate index templates that may contain the definition for the same field using a mix of object notation and dot notation. While we propagate the MapperBuilderContext across merge calls thanks to #86946, we do not propagate the right context when we call merge on objects as part of parsing/building mappings. This causes a situation in which the leaf fields that result from the merge have the wrong path, which misses the first portion e.g. sub.field instead of obj.sub.field. This commit applies the correct mapper builder context when building the object mapper builder and two objects with same name are found. Relates to #86946 Closes #88573 --- docs/changelog/89389.yaml | 6 ++ .../template/SimpleIndexTemplateIT.java | 87 +++++++++++++++++++ .../index/mapper/NestedObjectMapper.java | 4 +- .../index/mapper/ObjectMapper.java | 22 +++-- .../index/mapper/RootObjectMapper.java | 14 ++- .../index/mapper/DocumentParserTests.java | 56 ++++++++++++ .../index/mapper/MapperServiceTests.java | 66 ++++++++++++++ .../index/mapper/MappingParserTests.java | 63 ++++++++++++++ .../index/mapper/ObjectMapperMergeTests.java | 21 +++-- 9 files changed, 318 insertions(+), 21 deletions(-) create mode 100644 docs/changelog/89389.yaml diff --git a/docs/changelog/89389.yaml b/docs/changelog/89389.yaml new file mode 100644 index 0000000000000..e4779c6c153dc --- /dev/null +++ b/docs/changelog/89389.yaml @@ -0,0 +1,6 @@ +pr: 89389 +summary: Assign the right path to objects merged when parsing mappings +area: Mapping +type: bug +issues: + - 88573 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 85e188b30c1a5..64760ee8c154c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -14,6 +14,8 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; @@ -39,6 +41,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -1009,4 +1012,88 @@ public void testPartitionedTemplate() throws Exception { GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test_good").get(); assertEquals("6", getSettingsResponse.getIndexToSettings().get("test_good").get("index.routing_partition_size")); } + + public void testIndexTemplatesWithSameSubfield() { + client().admin() + .indices() + .preparePutTemplate("template_1") + .setPatterns(Collections.singletonList("te*")) + .setSettings(indexSettings()) + .setOrder(100) + .setMapping(""" + { + "_doc": { + "properties": { + "kwm": { + "properties": { + "source": { + "properties": { + "geo": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } + } + } + }, + "source": { + "properties": { + "geo": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } + } + } + } + } + """, XContentType.JSON) + .get(); + + client().admin() + .indices() + .preparePutTemplate("template_2") + .setPatterns(Collections.singletonList("test*")) + .setSettings(indexSettings()) + .setOrder(1) + .setMapping(""" + { + "_doc": { + "properties": { + "kwm.source.geo": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } + } + } + """, XContentType.JSON) + .get(); + + client().prepareIndex("test").setSource().get(); + FieldCapabilitiesResponse fieldCapabilitiesResponse = client().prepareFieldCaps("test").setFields("*location").get(); + { + Map field = fieldCapabilitiesResponse.getField("kwm.source.geo.location"); + assertNotNull(field); + FieldCapabilities fieldCapabilities = field.get("geo_point"); + assertTrue(fieldCapabilities.isSearchable()); + assertTrue(fieldCapabilities.isAggregatable()); + } + { + Map field = fieldCapabilitiesResponse.getField("source.geo.location"); + assertNotNull(field); + FieldCapabilities fieldCapabilities = field.get("geo_point"); + assertTrue(fieldCapabilities.isSearchable()); + assertTrue(fieldCapabilities.isAggregatable()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 68142091cde22..6533e48d893a4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -170,7 +170,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - public ObjectMapper merge(Mapper mergeWith, MapperService.MergeReason reason, MapperBuilderContext mapperBuilderContext) { + public ObjectMapper merge(Mapper mergeWith, MapperService.MergeReason reason, MapperBuilderContext parentBuilderContext) { if ((mergeWith instanceof NestedObjectMapper) == false) { throw new IllegalArgumentException("can't merge a non nested mapping [" + mergeWith.name() + "] with a nested mapping"); } @@ -191,7 +191,7 @@ public ObjectMapper merge(Mapper mergeWith, MapperService.MergeReason reason, Ma throw new MapperException("the [include_in_root] parameter can't be updated on a nested object mapping"); } } - toMerge.doMerge(mergeWithObject, reason, mapperBuilderContext); + toMerge.doMerge(mergeWithObject, reason, parentBuilderContext); return toMerge; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 9b3786015669e..e76b29e2bbd70 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -150,6 +150,11 @@ protected final Map buildMappers(boolean root, MapperBuilderCont assert mapper instanceof ObjectMapper == false || subobjects.value() : "unexpected object while subobjects are disabled"; Mapper existing = mappers.get(mapper.simpleName()); if (existing != null) { + // The same mappings or document may hold the same field twice, either because duplicated JSON keys are allowed or + // the same field is provided using the object notation as well as the dot notation at the same time. + // This can also happen due to multiple index templates being merged into a single mappings definition using + // XContentHelper#mergeDefaults, again in case some index templates contained mappings for the same field using a + // mix of object notation and dot notation. mapper = existing.merge(mapper, mapperBuilderContext); } mappers.put(mapper.simpleName(), mapper); @@ -426,7 +431,11 @@ public void validate(MappingLookup mappers) { } } - public ObjectMapper merge(Mapper mergeWith, MergeReason reason, MapperBuilderContext mapperBuilderContext) { + protected MapperBuilderContext createChildContext(MapperBuilderContext mapperBuilderContext, String name) { + return mapperBuilderContext.createChildContext(name); + } + + public ObjectMapper merge(Mapper mergeWith, MergeReason reason, MapperBuilderContext parentBuilderContext) { if ((mergeWith instanceof ObjectMapper) == false) { throw new IllegalArgumentException("can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping"); } @@ -436,12 +445,11 @@ public ObjectMapper merge(Mapper mergeWith, MergeReason reason, MapperBuilderCon } ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; ObjectMapper merged = clone(); - merged.doMerge(mergeWithObject, reason, mapperBuilderContext); + merged.doMerge(mergeWithObject, reason, parentBuilderContext); return merged; } - protected void doMerge(final ObjectMapper mergeWith, MergeReason reason, MapperBuilderContext mapperBuilderContext) { - + protected void doMerge(final ObjectMapper mergeWith, MergeReason reason, MapperBuilderContext parentBuilderContext) { if (mergeWith.dynamic != null) { this.dynamic = mergeWith.dynamic; } @@ -462,6 +470,7 @@ protected void doMerge(final ObjectMapper mergeWith, MergeReason reason, MapperB } } + MapperBuilderContext objectBuilderContext = createChildContext(parentBuilderContext, simpleName()); Map mergedMappers = null; for (Mapper mergeWithMapper : mergeWith) { Mapper mergeIntoMapper = (mergedMappers == null ? mappers : mergedMappers).get(mergeWithMapper.simpleName()); @@ -470,8 +479,7 @@ protected void doMerge(final ObjectMapper mergeWith, MergeReason reason, MapperB if (mergeIntoMapper == null) { merged = mergeWithMapper; } else if (mergeIntoMapper instanceof ObjectMapper objectMapper) { - MapperBuilderContext childContext = mapperBuilderContext.createChildContext(objectMapper.simpleName()); - merged = objectMapper.merge(mergeWithMapper, reason, childContext); + merged = objectMapper.merge(mergeWithMapper, reason, objectBuilderContext); } else { assert mergeIntoMapper instanceof FieldMapper || mergeIntoMapper instanceof FieldAliasMapper; if (mergeWithMapper instanceof ObjectMapper) { @@ -485,7 +493,7 @@ protected void doMerge(final ObjectMapper mergeWith, MergeReason reason, MapperB if (reason == MergeReason.INDEX_TEMPLATE) { merged = mergeWithMapper; } else { - merged = mergeIntoMapper.merge(mergeWithMapper, mapperBuilderContext); + merged = mergeIntoMapper.merge(mergeWithMapper, objectBuilderContext); } } if (mergedMappers == null) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index f9b4cdcecbc94..288e2a1b60aa9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -322,13 +322,19 @@ RuntimeField getRuntimeField(String name) { } @Override - public RootObjectMapper merge(Mapper mergeWith, MergeReason reason, MapperBuilderContext mapperBuilderContext) { - return (RootObjectMapper) super.merge(mergeWith, reason, mapperBuilderContext); + protected MapperBuilderContext createChildContext(MapperBuilderContext mapperBuilderContext, String name) { + assert mapperBuilderContext == MapperBuilderContext.ROOT; + return mapperBuilderContext; } @Override - protected void doMerge(ObjectMapper mergeWith, MergeReason reason, MapperBuilderContext mapperBuilderContext) { - super.doMerge(mergeWith, reason, mapperBuilderContext); + public RootObjectMapper merge(Mapper mergeWith, MergeReason reason, MapperBuilderContext parentBuilderContext) { + return (RootObjectMapper) super.merge(mergeWith, reason, parentBuilderContext); + } + + @Override + protected void doMerge(ObjectMapper mergeWith, MergeReason reason, MapperBuilderContext parentBuilderContext) { + super.doMerge(mergeWith, reason, parentBuilderContext); RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; if (mergeWithObject.numericDetection.explicit()) { this.numericDetection = mergeWithObject.numericDetection; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 6c3f50c30de35..e72add443eafc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -33,6 +33,7 @@ import java.math.BigDecimal; import java.math.BigInteger; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -2351,6 +2352,61 @@ public void testDocumentDescriptionInTsdb() throws IOException { } } + public void testMergeSubfieldWhileBuildingMappers() throws Exception { + MapperService mapperService = createMapperService(); + /* + We had a bug (https://github.com/elastic/elasticsearch/issues/88573) building an object mapper (ObjectMapper.Builder#buildMappers). + A sub-field that already exists is merged with the existing one. As a result, the leaf field would get the wrong field path + (missing the first portion of its path). The only way to trigger this scenario for dynamic mappings is to either allow duplicate + JSON keys or ingest the same field with dots collapsed as well as expanded within the same document. Note that the two fields with + same name need to be part of the same mappings (hence the same document). If they are in two distinct mappings they are properly + merged as part of RootObjectMapper#merge. + */ + ParsedDocument doc = mapperService.documentMapper().parse(source(""" + { + "foo" : { + "bar" : { + "baz" : 1 + } + }, + "foo.bar.baz" : 2 + } + """)); + Mapping mapping = doc.dynamicMappingsUpdate(); + assertNotNull(mapping); + Mapper fooMapper = mapping.getRoot().getMapper("foo"); + assertNotNull(fooMapper); + assertTrue(fooMapper instanceof ObjectMapper); + Mapper barMapper = ((ObjectMapper) fooMapper).getMapper("bar"); + assertTrue(barMapper instanceof ObjectMapper); + Mapper baz = ((ObjectMapper) barMapper).getMapper("baz"); + assertNotNull(baz); + assertEquals("foo.bar.baz", baz.name()); + assertEquals("baz", baz.simpleName()); + IndexableField[] fields = doc.rootDoc().getFields("foo.bar.baz"); + assertEquals(4, fields.length); + long[] longs = Arrays.stream(fields).mapToLong(value -> value.numericValue().longValue()).toArray(); + assertArrayEquals(new long[] { 1, 1, 2, 2 }, longs); + + // merge without going through toXContent and reparsing, otherwise the potential leaf path issue gets fixed on its own + Mapping newMapping = MapperService.mergeMappings(mapperService.documentMapper(), mapping, MapperService.MergeReason.MAPPING_UPDATE); + DocumentMapper newDocMapper = new DocumentMapper(mapperService.documentParser(), newMapping, newMapping.toCompressedXContent()); + ParsedDocument doc2 = newDocMapper.parse(source(""" + { + "foo" : { + "bar" : { + "baz" : 10 + } + } + } + """)); + assertNull(doc2.dynamicMappingsUpdate()); + IndexableField[] fields2 = doc2.rootDoc().getFields("foo.bar.baz"); + assertEquals(2, fields2.length); + long[] longs2 = Arrays.stream(fields2).mapToLong(value -> value.numericValue().longValue()).toArray(); + assertArrayEquals(new long[] { 10, 10 }, longs2); + } + /** * Mapper plugin providing a mock metadata field mapper implementation that supports setting its value */ diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 38d7567ce40e3..5cefbfeadfeb2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -8,8 +8,10 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.IndexableField; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -360,4 +362,68 @@ public void testMultiFieldChecks() throws IOException { assertFalse(mapperService.isMultiField("object.subfield1")); } + public void testMergeObjectSubfieldWhileParsing() throws IOException { + /* + If we are parsing mappings that hold the definition of the same field twice, the two are merged together. This can happen when + mappings have the same field specified using the object notation as well as the dot notation, as well as when applying index + templates, in which case the two definitions may come from separate index templates that end up in the same map (through + XContentHelper#mergeDefaults, see MetadataCreateIndexService#parseV1Mappings). + We had a bug (https://github.com/elastic/elasticsearch/issues/88573) triggered by this scenario that caused the merged leaf fields + to get the wrong path (missing the first portion). + */ + MapperService mapperService = createMapperService(""" + { + "_doc": { + "properties": { + "obj": { + "properties": { + "sub": { + "properties": { + "string": { + "type": "keyword" + } + } + } + } + }, + "obj.sub.string" : { + "type" : "keyword" + } + } + } + } + """); + + assertNotNull(mapperService.mappingLookup().getMapper("obj.sub.string")); + MappedFieldType fieldType = mapperService.mappingLookup().getFieldType("obj.sub.string"); + assertNotNull(fieldType); + assertEquals(""" + { + "_doc" : { + "properties" : { + "obj" : { + "properties" : { + "sub" : { + "properties" : { + "string" : { + "type" : "keyword" + } + } + } + } + } + } + } + }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); + + // check that with the resulting mappings a new document has the previously merged field indexed properly + ParsedDocument parsedDocument = mapperService.documentMapper().parse(source(""" + { + "obj.sub.string" : "value" + }""")); + + assertNull(parsedDocument.dynamicMappingsUpdate()); + IndexableField[] fields = parsedDocument.rootDoc().getFields("obj.sub.string"); + assertEquals(2, fields.length); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 3ea480b97c24e..2e0c07940a562 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -143,4 +143,67 @@ public void testBadMetadataMapper() throws IOException { ); assertEquals("[_routing] config must be an object", e.getMessage()); } + + public void testMergeSubfieldWhileParsing() throws Exception { + /* + If we are parsing mappings that hold the definition of the same field twice, the two are merged together. This can happen when + mappings have the same field specified using the object notation as well as the dot notation, as well as when applying index + templates, in which case the two definitions may come from separate index templates that end up in the same map (through + XContentHelper#mergeDefaults, see MetadataCreateIndexService#parseV1Mappings). + We had a bug (https://github.com/elastic/elasticsearch/issues/88573) triggered by this scenario that caused the merged leaf fields + to get the wrong path (missing the first portion). + */ + String mappingAsString = """ + { + "_doc": { + "properties": { + "obj": { + "properties": { + "source": { + "properties": { + "geo": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } + } + } + }, + "obj.source.geo.location" : { + "type": "geo_point" + } + } + } + } + """; + Mapping mapping = createMappingParser(Settings.EMPTY).parse("_doc", new CompressedXContent(mappingAsString)); + assertEquals(1, mapping.getRoot().mappers.size()); + Mapper object = mapping.getRoot().getMapper("obj"); + assertThat(object, CoreMatchers.instanceOf(ObjectMapper.class)); + assertEquals("obj", object.simpleName()); + assertEquals("obj", object.name()); + ObjectMapper objectMapper = (ObjectMapper) object; + assertEquals(1, objectMapper.mappers.size()); + object = objectMapper.getMapper("source"); + assertThat(object, CoreMatchers.instanceOf(ObjectMapper.class)); + assertEquals("source", object.simpleName()); + assertEquals("obj.source", object.name()); + objectMapper = (ObjectMapper) object; + assertEquals(1, objectMapper.mappers.size()); + object = objectMapper.getMapper("geo"); + assertThat(object, CoreMatchers.instanceOf(ObjectMapper.class)); + assertEquals("geo", object.simpleName()); + assertEquals("obj.source.geo", object.name()); + objectMapper = (ObjectMapper) object; + assertEquals(1, objectMapper.mappers.size()); + Mapper location = objectMapper.getMapper("location"); + assertThat(location, CoreMatchers.instanceOf(GeoPointFieldMapper.class)); + GeoPointFieldMapper geoPointFieldMapper = (GeoPointFieldMapper) location; + assertEquals("obj.source.geo.location", geoPointFieldMapper.name()); + assertEquals("location", geoPointFieldMapper.simpleName()); + assertEquals("obj.source.geo.location", geoPointFieldMapper.mappedFieldType.name()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index 8627cd4c16598..d762cbd3d8c28 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -17,13 +17,9 @@ import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.notNullValue; public class ObjectMapperMergeTests extends ESTestCase { - private final FieldMapper barFieldMapper = createTextFieldMapper("bar"); - private final FieldMapper bazFieldMapper = createTextFieldMapper("baz"); - private final RootObjectMapper rootObjectMapper = createMapping(false, true, true, false); private RootObjectMapper createMapping( @@ -35,10 +31,13 @@ private RootObjectMapper createMapping( Map mappers = new HashMap<>(); mappers.put("disabled", createObjectMapper("disabled", disabledFieldEnabled, emptyMap())); Map fooMappers = new HashMap<>(); + MapperBuilderContext fooBuilderContext = MapperBuilderContext.ROOT.createChildContext("foo"); if (includeBarField) { + FieldMapper barFieldMapper = createTextFieldMapper("bar", fooBuilderContext); fooMappers.put("bar", barFieldMapper); } if (includeBazField) { + FieldMapper bazFieldMapper = createTextFieldMapper("baz", fooBuilderContext); fooMappers.put("baz", bazFieldMapper); } mappers.put("foo", createObjectMapper("foo", fooFieldEnabled, Collections.unmodifiableMap(fooMappers))); @@ -54,8 +53,14 @@ public void testMerge() { // THEN "baz" new field is added to merged mapping final ObjectMapper mergedFoo = (ObjectMapper) merged.getMapper("foo"); - assertThat(mergedFoo.getMapper("bar"), notNullValue()); - assertThat(mergedFoo.getMapper("baz"), notNullValue()); + { + Mapper bar = mergedFoo.getMapper("bar"); + assertEquals("bar", bar.simpleName()); + assertEquals("foo.bar", bar.name()); + Mapper baz = mergedFoo.getMapper("baz"); + assertEquals("baz", baz.simpleName()); + assertEquals("foo.baz", baz.name()); + } } public void testMergeWhenDisablingField() { @@ -263,8 +268,8 @@ private ObjectMapper createObjectSubobjectsFalseLeafWithMultiField() { .build(MapperBuilderContext.ROOT); } - private TextFieldMapper createTextFieldMapper(String name) { - return new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()).build(MapperBuilderContext.ROOT); + private TextFieldMapper createTextFieldMapper(String name, MapperBuilderContext mapperBuilderContext) { + return new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()).build(mapperBuilderContext); } private TextFieldMapper createTextKeywordMultiField(String name, MapperBuilderContext mapperBuilderContext) { From 695d1a84af37e58cf01aa0abca021cfbd710678e Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 17 Aug 2022 14:46:20 +0200 Subject: [PATCH 236/265] Remove root argument from buildMappers method (#89390) The callers of buildMappers can provide the right context, instead of passing a boolean argument that controls what context is used. --- .../index/mapper/NestedObjectMapper.java | 2 +- .../org/elasticsearch/index/mapper/ObjectMapper.java | 12 +++++++++--- .../elasticsearch/index/mapper/RootObjectMapper.java | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 6533e48d893a4..45f1363fb1a36 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -49,7 +49,7 @@ Builder includeInParent(boolean includeInParent) { @Override public NestedObjectMapper build(MapperBuilderContext context) { - return new NestedObjectMapper(name, context.buildFullName(name), buildMappers(false, context), this); + return new NestedObjectMapper(name, context.buildFullName(name), buildMappers(context.createChildContext(name)), this); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index e76b29e2bbd70..636cd3628beeb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -142,8 +142,7 @@ private static ObjectMapper.Builder findObjectBuilder(String fullName, DocumentP throw new IllegalStateException("Missing intermediate object " + fullName); } - protected final Map buildMappers(boolean root, MapperBuilderContext context) { - MapperBuilderContext mapperBuilderContext = root ? context : context.createChildContext(name); + protected final Map buildMappers(MapperBuilderContext mapperBuilderContext) { Map mappers = new HashMap<>(); for (Mapper.Builder builder : mappersBuilders) { Mapper mapper = builder.build(mapperBuilderContext); @@ -164,7 +163,14 @@ protected final Map buildMappers(boolean root, MapperBuilderCont @Override public ObjectMapper build(MapperBuilderContext context) { - return new ObjectMapper(name, context.buildFullName(name), enabled, subobjects, dynamic, buildMappers(false, context)); + return new ObjectMapper( + name, + context.buildFullName(name), + enabled, + subobjects, + dynamic, + buildMappers(context.createChildContext(name)) + ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 288e2a1b60aa9..b0c042fe3275e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -107,7 +107,7 @@ public RootObjectMapper build(MapperBuilderContext context) { enabled, subobjects, dynamic, - buildMappers(true, context), + buildMappers(context), runtimeFields, dynamicDateTimeFormatters, dynamicTemplates, From 837a8d7a6e434ac3d34f4aa534eb4b5b602fe254 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Francisco=20Fern=C3=A1ndez=20Casta=C3=B1o?= Date: Wed, 17 Aug 2022 15:00:39 +0200 Subject: [PATCH 237/265] Add support for floating point node.processors setting (#89281) This commit adds support for floating point node.processors setting. This is useful when the nodes run in an environment where the CPU time assigned to the ES node process is limited (i.e. using cgroups). With this change, the system would be able to size the thread pools accordingly, in this case it would round up the provided setting to the closest integer. --- docs/reference/modules/threadpool.asciidoc | 4 +- .../netty4/Netty4HttpServerTransport.java | 2 +- .../transport/netty4/Netty4Transport.java | 2 +- .../transport/netty4/Netty4Utils.java | 2 +- .../common/util/concurrent/EsExecutors.java | 30 ++++++++-- .../util/concurrent/EsExecutorsTests.java | 60 ++++++++++++++++++- 6 files changed, 87 insertions(+), 13 deletions(-) diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index e0b99ddf15630..b6eb62836378e 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -177,7 +177,9 @@ thread_pool: The number of processors is automatically detected, and the thread pool settings are automatically set based on it. In some cases it can be useful to override the number of detected processors. This can be done by explicitly setting the -`node.processors` setting. +`node.processors` setting. This setting accepts floating point numbers, this +can be useful in environments where the Elasticsearch nodes are configured +to run with CPU limits, such as cpu shares or quota under `Cgroups`. [source,yaml] -------------------------------------------------- diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 5f49e2505cbf6..a0e4156b83c84 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -158,7 +158,7 @@ public Netty4HttpServerTransport( clusterSettings, tracer ); - Netty4Utils.setAvailableProcessors(EsExecutors.NODE_PROCESSORS_SETTING.get(settings)); + Netty4Utils.setAvailableProcessors(EsExecutors.allocatedProcessors(settings)); NettyAllocator.logAllocatorDescriptionIfNeeded(); this.sharedGroupFactory = sharedGroupFactory; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 0241669d15b8e..563dc8c77ac30 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -107,7 +107,7 @@ public Netty4Transport( SharedGroupFactory sharedGroupFactory ) { super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); - Netty4Utils.setAvailableProcessors(EsExecutors.NODE_PROCESSORS_SETTING.get(settings)); + Netty4Utils.setAvailableProcessors(EsExecutors.allocatedProcessors(settings)); NettyAllocator.logAllocatorDescriptionIfNeeded(); this.sharedGroupFactory = sharedGroupFactory; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java index 85f6fcf93aee3..2dae8bc9258fe 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java @@ -114,7 +114,7 @@ public static BytesReference toBytesReference(final ByteBuf buffer) { public static Recycler createRecycler(Settings settings) { // If this method is called by super ctor the processors will not be set. Accessing NettyAllocator initializes netty's internals // setting the processors. We must do it ourselves first just in case. - setAvailableProcessors(EsExecutors.NODE_PROCESSORS_SETTING.get(settings)); + setAvailableProcessors(EsExecutors.allocatedProcessors(settings)); return NettyAllocator.getRecycler(); } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 6b881414de760..dd696f7b6b4f1 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -37,13 +37,31 @@ public class EsExecutors { /** * Setting to manually control the number of allocated processors. This setting is used to adjust thread pool sizes per node. The * default value is {@link Runtime#availableProcessors()} but should be manually controlled if not all processors on the machine are - * available to Elasticsearch (e.g., because of CPU limits). + * available to Elasticsearch (e.g., because of CPU limits). Note that this setting accepts floating point processors. + * If a rounded number is needed, always use {@link EsExecutors#allocatedProcessors(Settings)}. */ - public static final Setting NODE_PROCESSORS_SETTING = Setting.intSetting( + public static final Setting NODE_PROCESSORS_SETTING = new Setting<>( "node.processors", - Runtime.getRuntime().availableProcessors(), - 1, - Runtime.getRuntime().availableProcessors(), + Double.toString(Runtime.getRuntime().availableProcessors()), + textValue -> { + double numberOfProcessors = Double.parseDouble(textValue); + if (Double.isNaN(numberOfProcessors) || Double.isInfinite(numberOfProcessors)) { + String err = "Failed to parse value [" + textValue + "] for setting [node.processors]"; + throw new IllegalArgumentException(err); + } + + if (numberOfProcessors <= 0.0) { + String err = "Failed to parse value [" + textValue + "] for setting [node.processors] must be > 0"; + throw new IllegalArgumentException(err); + } + + final int maxNumberOfProcessors = Runtime.getRuntime().availableProcessors(); + if (numberOfProcessors > maxNumberOfProcessors) { + String err = "Failed to parse value [" + textValue + "] for setting [node.processors] must be <= " + maxNumberOfProcessors; + throw new IllegalArgumentException(err); + } + return numberOfProcessors; + }, Property.NodeScope ); @@ -55,7 +73,7 @@ public class EsExecutors { * @return the number of allocated processors */ public static int allocatedProcessors(final Settings settings) { - return NODE_PROCESSORS_SETTING.get(settings); + return (int) Math.ceil(NODE_PROCESSORS_SETTING.get(settings)); } public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing( diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index 0807addcb86f6..15ac647a2a256 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -25,6 +25,7 @@ import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; /** @@ -437,14 +438,14 @@ public void testGetTasks() throws InterruptedException { } public void testNodeProcessorsBound() { - final Setting processorsSetting = EsExecutors.NODE_PROCESSORS_SETTING; + final Setting processorsSetting = EsExecutors.NODE_PROCESSORS_SETTING; final int available = Runtime.getRuntime().availableProcessors(); - final int processors = randomIntBetween(available + 1, Integer.MAX_VALUE); + final double processors = randomDoubleBetween(available + Math.ulp(available), Float.MAX_VALUE, true); final Settings settings = Settings.builder().put(processorsSetting.getKey(), processors).build(); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processorsSetting.get(settings)); final String expected = String.format( Locale.ROOT, - "Failed to parse value [%d] for setting [%s] must be <= %d", + "Failed to parse value [%s] for setting [%s] must be <= %d", processors, processorsSetting.getKey(), available @@ -452,4 +453,57 @@ public void testNodeProcessorsBound() { assertThat(e, hasToString(containsString(expected))); } + public void testNodeProcessorsIsRoundedUpWhenUsingFloats() { + assertThat( + EsExecutors.allocatedProcessors(Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), Double.MIN_VALUE).build()), + is(equalTo(1)) + ); + + assertThat( + EsExecutors.allocatedProcessors(Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 0.2).build()), + is(equalTo(1)) + ); + + assertThat( + EsExecutors.allocatedProcessors(Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 1.2).build()), + is(equalTo(2)) + ); + + assertThat( + EsExecutors.allocatedProcessors( + Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), Runtime.getRuntime().availableProcessors()).build() + ), + is(equalTo(Runtime.getRuntime().availableProcessors())) + ); + } + + public void testNodeProcessorsFloatValidation() { + final Setting processorsSetting = EsExecutors.NODE_PROCESSORS_SETTING; + + { + final Settings settings = Settings.builder().put(processorsSetting.getKey(), 0.0).build(); + expectThrows(IllegalArgumentException.class, () -> processorsSetting.get(settings)); + } + + { + final Settings settings = Settings.builder().put(processorsSetting.getKey(), Double.NaN).build(); + expectThrows(IllegalArgumentException.class, () -> processorsSetting.get(settings)); + } + + { + final Settings settings = Settings.builder().put(processorsSetting.getKey(), Double.POSITIVE_INFINITY).build(); + expectThrows(IllegalArgumentException.class, () -> processorsSetting.get(settings)); + } + + { + final Settings settings = Settings.builder().put(processorsSetting.getKey(), Double.NEGATIVE_INFINITY).build(); + expectThrows(IllegalArgumentException.class, () -> processorsSetting.get(settings)); + } + + { + final Settings settings = Settings.builder().put(processorsSetting.getKey(), -1.5).build(); + expectThrows(IllegalArgumentException.class, () -> processorsSetting.get(settings)); + } + } + } From 2a08258224dbdab4e375a17f7a4393395b481f0c Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 17 Aug 2022 15:13:01 +0200 Subject: [PATCH 238/265] Fix BlobStoreIncrementalityIT.testRecordCorrectSegmentCountsWithBackgroundMerges (#89416) Create more segments here to make sure the background merge always merges. With just 3 segments and a max-segments-per-tier of 2 we don't have the guarantee that a merge will actually run and hence the test will fail when waiting for the background merge. closes #89412 --- .../org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java index 4c278aeddb5bf..e3fdb92785503 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java @@ -197,7 +197,7 @@ public void testRecordCorrectSegmentCountsWithBackgroundMerges() throws Exceptio // create a situation where we temporarily have a bunch of segments until the merges can catch up long id = 0; - final int rounds = scaledRandomIntBetween(3, 5); + final int rounds = scaledRandomIntBetween(5, 9); for (int i = 0; i < rounds; ++i) { final int numDocs = scaledRandomIntBetween(100, 1000); BulkRequestBuilder request = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); From 79a89790e3e88d9e1f7f6b6287412c71ca2412db Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 17 Aug 2022 10:18:36 -0400 Subject: [PATCH 239/265] Synthetic source: load text from stored fields (#87480) Adds support for loading `text` and `keyword` fields that have `store: true`. We could likely load *any* stored fields, but I wanted to blaze the trail using something fairly useful. --- .../mapper/extras/ScaledFloatFieldMapper.java | 3 +- .../test/60_synthetic_source.yml | 2 +- .../test/get/100_synthetic_source.yml | 76 ++++++ .../test/mget/90_synthetic_source.yml | 79 +++++- .../test/search/400_synthetic_source.yml | 117 +++++++++ .../test/update/100_synthetic_source.yml | 60 +++++ .../index/get/ShardGetService.java | 30 ++- .../index/mapper/BooleanFieldMapper.java | 2 +- .../index/mapper/DateFieldMapper.java | 2 +- .../index/mapper/GeoPointFieldMapper.java | 2 +- .../index/mapper/IpFieldMapper.java | 2 +- .../index/mapper/KeywordFieldMapper.java | 193 +------------- .../index/mapper/NumberFieldMapper.java | 148 +---------- .../index/mapper/ObjectMapper.java | 100 +++++--- ...dNumericDocValuesSyntheticFieldLoader.java | 202 +++++++++++++++ ...ortedSetDocValuesSyntheticFieldLoader.java | 237 ++++++++++++++++++ .../index/mapper/SourceLoader.java | 117 +++++++-- .../mapper/StringStoredFieldFieldLoader.java | 64 +++++ .../index/mapper/TextFieldMapper.java | 13 +- .../search/fetch/FetchPhase.java | 17 +- .../index/mapper/KeywordFieldMapperTests.java | 25 +- .../index/mapper/SourceLoaderTests.java | 4 +- .../index/mapper/TextFieldMapperTests.java | 20 +- .../index/mapper/MapperServiceTestCase.java | 31 ++- .../index/mapper/MapperTestCase.java | 12 +- .../AggregateDoubleMetricFieldMapper.java | 68 +++-- .../mapper/ConstantKeywordFieldMapper.java | 22 +- 27 files changed, 1162 insertions(+), 486 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoader.java create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index 84c51fe0ab6c1..06bac2ec8b1e1 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.SimpleMappedFieldType; +import org.elasticsearch.index.mapper.SortedNumericDocValuesSyntheticFieldLoader; import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.SourceValueFetcher; import org.elasticsearch.index.mapper.TextSearchInfo; @@ -705,7 +706,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new NumberFieldMapper.NumericSyntheticFieldLoader(name(), simpleName()) { + return new SortedNumericDocValuesSyntheticFieldLoader(name(), simpleName()) { @Override protected void writeValue(XContentBuilder b, long value) throws IOException { b.value(decodeForSyntheticSource(value, scalingFactor)); diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml index 5a86fa2074675..55d39940081bc 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/60_synthetic_source.yml @@ -10,7 +10,7 @@ unsupported: body: mappings: _source: - synthetic: true + mode: synthetic properties: join_field: type: join diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml index e87a727de94bd..5bcde58ea36d3 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml @@ -167,3 +167,79 @@ force_synthetic_source_bad_mapping: index: test id: 1 force_synthetic_source: true + +--- +stored text: + - skip: + version: " - 8.4.99" + reason: introduced in 8.5.0 + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + text: + type: text + store: true + + - do: + index: + index: test + id: 1 + refresh: true + body: + text: the quick brown fox + + - do: + get: + index: test + id: 1 + - match: {_index: "test"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + text: the quick brown fox + +--- +stored keyword: + - skip: + version: " - 8.4.99" + reason: introduced in 8.5.0 + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + kwd: + type: keyword + store: true + + - do: + index: + index: test + id: 1 + refresh: true + body: + kwd: the quick brown fox + + - do: + get: + index: test + id: 1 + - match: {_index: "test"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + kwd: the quick brown fox diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml index e7cde7fa1a7cf..327aa2d0fa4d2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml @@ -46,6 +46,55 @@ keyword: docs.1._source: kwd: bar +--- +stored text: + - skip: + version: " - 8.4.99" + reason: introduced in 8.5.0 + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + text: + type: text + store: true + + - do: + index: + index: test + id: 1 + body: + text: the quick brown fox + + - do: + index: + index: test + id: 2 + body: + text: jumped over the lazy dog + + - do: + mget: + index: test + body: + ids: [1, 2] + - match: {docs.0._index: "test"} + - match: {docs.0._id: "1"} + - match: + docs.0._source: + text: the quick brown fox + + - match: {docs.1._index: "test"} + - match: {docs.1._id: "2"} + - match: + docs.1._source: + text: jumped over the lazy dog + --- force_synthetic_source_ok: - skip: @@ -60,22 +109,25 @@ force_synthetic_source_ok: _source: mode: stored properties: - kwd: - type: keyword + obj: + properties: + kwd: + type: keyword - do: index: index: test id: 1 body: - kwd: foo + obj.kwd: foo - do: index: index: test id: 2 body: - kwd: bar + obj: + kwd: bar # When _source is used in the fetch the original _source is perfect - do: @@ -85,10 +137,11 @@ force_synthetic_source_ok: ids: [1, 2] - match: docs.0._source: - kwd: foo + obj.kwd: foo - match: docs.1._source: - kwd: bar + obj: + kwd: bar # When we force synthetic source dots in field names get turned into objects - do: @@ -99,16 +152,18 @@ force_synthetic_source_ok: ids: [ 1, 2 ] - match: docs.0._source: - kwd: foo + obj: + kwd: foo - match: docs.1._source: - kwd: bar + obj: + kwd: bar --- force_synthetic_source_bad_mapping: - skip: - version: " - 8.3.99" - reason: introduced in 8.4.0 + version: " - 8.4.99" + reason: message changed in 8.5 - do: indices.create: @@ -157,5 +212,5 @@ force_synthetic_source_bad_mapping: force_synthetic_source: true body: ids: [ 1, 2 ] - - match: {docs.0.error.reason: "field [text] of type [text] doesn't support synthetic source unless it has a sub-field of type [keyword] with doc values enabled and without ignore_above or a normalizer"} - - match: {docs.1.error.reason: "field [text] of type [text] doesn't support synthetic source unless it has a sub-field of type [keyword] with doc values enabled and without ignore_above or a normalizer"} + - match: {docs.0.error.reason: "field [text] of type [text] doesn't support synthetic source unless it is stored or has a sub-field of type [keyword] with doc values or stored and without ignore_above or a normalizer"} + - match: {docs.1.error.reason: "field [text] of type [text] doesn't support synthetic source unless it is stored or has a sub-field of type [keyword] with doc values or stored and without ignore_above or a normalizer"} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml index b95fc62d24ffd..55351969dbdcd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml @@ -33,6 +33,123 @@ keyword: hits.hits.0._source: kwd: foo +--- +stored text: + - skip: + version: " - 8.4.99" + reason: introduced in 8.5.0 + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + text: + type: text + store: true + + - do: + index: + index: test + id: 1 + refresh: true + body: + text: the quick brown fox + + - do: + search: + index: test + body: + query: + ids: + values: [1] + - match: + hits.hits.0._source: + text: the quick brown fox + +--- +stored keyword: + - skip: + version: " - 8.4.99" + reason: introduced in 8.5.0 + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + kwd: + type: keyword + store: true + + - do: + index: + index: test + id: 1 + refresh: true + body: + kwd: the quick brown fox + + - do: + search: + index: test + body: + query: + ids: + values: [1] + - match: + hits.hits.0._source: + kwd: the quick brown fox + +--- +stored keyword without sibling fields: + - skip: + version: " - 8.4.99" + reason: introduced in 8.5.0 + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + kwd: + type: keyword + store: true + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"kwd": "the quick brown fox", "s": 1, "n": 1}' + - '{"index": {}}' + - '{"kwd": "jumped over the lazy dog", "s": 2}' + + - do: + search: + index: test + body: + sort: s + - match: + hits.hits.0._source: + kwd: the quick brown fox + s: 1 + n: 1 + - match: + hits.hits.1._source: + kwd: jumped over the lazy dog + s: 2 + --- force_synthetic_source_ok: - skip: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml index 6c8e32374884d..2ad71d3b6ed55 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/100_synthetic_source.yml @@ -54,3 +54,63 @@ keyword: run_expensive_tasks: true - is_false: test.fields._source - is_true: test.fields._recovery_source + +--- +stored text: + - skip: + version: " - 8.4.99" + reason: introduced in 8.5.0 + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + text: + type: text + store: true + text2: + type: text + store: true + + - do: + index: + index: test + id: 1 + refresh: true + body: + text: the quick brown fox + + - do: + update: + index: test + id: 1 + body: + doc_as_upsert: true + doc: + text2: jumped over the lazy dog + - match: {result: updated} + + - do: + get: + index: test + id: 1 + - match: {_index: "test"} + - match: {_id: "1"} + - match: {_version: 2} + - match: {found: true} + - match: + _source: + text: the quick brown fox + text2: jumped over the lazy dog + + # Make sure there isn't any _source stored field + - do: + indices.disk_usage: + index: test + run_expensive_tasks: true + - is_false: test.fields._source + - is_true: test.fields._recovery_source diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index df7d454ace53a..c1d3bd06c08ed 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentFieldFilter; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexSettings; @@ -34,9 +33,12 @@ import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; @@ -247,17 +249,16 @@ private GetResult innerGetFetch( Map metadataFields = null; BytesReference source = null; DocIdAndVersion docIdAndVersion = get.docIdAndVersion(); - FieldsVisitor fieldVisitor = buildFieldsVisitors(storedFields, fetchSourceContext); + SourceLoader loader = forceSyntheticSource + ? new SourceLoader.Synthetic(mappingLookup.getMapping()) + : mappingLookup.newSourceLoader(); + FieldsVisitor fieldVisitor = buildFieldsVisitors(storedFields, fetchSourceContext, loader); if (fieldVisitor != null) { try { docIdAndVersion.reader.document(docIdAndVersion.docId, fieldVisitor); } catch (IOException e) { throw new ElasticsearchException("Failed to get id [" + id + "]", e); } - SourceLoader loader = forceSyntheticSource - ? new SourceLoader.Synthetic(mappingLookup.getMapping()) - : mappingLookup.newSourceLoader(); - source = loader.leaf(docIdAndVersion.reader, new int[] { docIdAndVersion.docId }).source(fieldVisitor, docIdAndVersion.docId); // put stored fields into result objects if (fieldVisitor.fields().isEmpty() == false) { @@ -272,6 +273,7 @@ private GetResult innerGetFetch( } } } + source = loader.leaf(docIdAndVersion.reader, new int[] { docIdAndVersion.docId }).source(fieldVisitor, docIdAndVersion.docId); } if (source != null) { @@ -301,11 +303,19 @@ private GetResult innerGetFetch( ); } - private static FieldsVisitor buildFieldsVisitors(String[] fields, FetchSourceContext fetchSourceContext) { - if (fields == null || fields.length == 0) { + private static FieldsVisitor buildFieldsVisitors(String[] fields, FetchSourceContext fetchSourceContext, SourceLoader loader) { + if (fields != null && fields.length > 0) { + Set fieldsToLoad = new HashSet<>(); + Collections.addAll(fieldsToLoad, fields); + if (fetchSourceContext.fetchSource()) { + fieldsToLoad.addAll(loader.requiredStoredFields()); + } + return new CustomFieldsVisitor(fieldsToLoad, fetchSourceContext.fetchSource()); + } + Set sourceFields = fetchSourceContext.fetchSource() ? loader.requiredStoredFields() : Set.of(); + if (sourceFields.isEmpty()) { return fetchSourceContext.fetchSource() ? new FieldsVisitor(true) : null; } - - return new CustomFieldsVisitor(Sets.newHashSet(fields), fetchSourceContext.fetchSource()); + return new CustomFieldsVisitor(sourceFields, fetchSourceContext.fetchSource()); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 8f085195dae0c..11e9843ee243f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -466,7 +466,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new NumberFieldMapper.NumericSyntheticFieldLoader(name(), simpleName()) { + return new SortedNumericDocValuesSyntheticFieldLoader(name(), simpleName()) { @Override protected void writeValue(XContentBuilder b, long value) throws IOException { b.value(value == 1); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 22ef8c1bc3b20..c3728b8205025 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -915,7 +915,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new NumberFieldMapper.NumericSyntheticFieldLoader(name(), simpleName()) { + return new SortedNumericDocValuesSyntheticFieldLoader(name(), simpleName()) { @Override protected void writeValue(XContentBuilder b, long value) throws IOException { b.value(fieldType().format(value, fieldType().dateTimeFormatter())); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 2951162275399..0143bf952e9dd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -497,7 +497,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new NumberFieldMapper.NumericSyntheticFieldLoader(name(), simpleName()) { + return new SortedNumericDocValuesSyntheticFieldLoader(name(), simpleName()) { final GeoPoint point = new GeoPoint(); @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index c39e02b1d7012..21d9c0fe8fbc8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -553,7 +553,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new KeywordFieldMapper.BytesSyntheticFieldLoader(name(), simpleName()) { + return new SortedSetDocValuesSyntheticFieldLoader(name(), simpleName()) { @Override protected BytesRef convert(BytesRef value) { byte[] bytes = Arrays.copyOfRange(value.bytes, value.offset, value.offset + value.length); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 8c4f5649f7915..a1386624d4239 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -15,15 +15,12 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedSetDocValuesField; -import org.apache.lucene.index.DocValues; import org.apache.lucene.index.FilteredTermsEnum; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.ReaderSlice; -import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -65,7 +62,6 @@ import org.elasticsearch.search.runtime.StringScriptFieldRegexpQuery; import org.elasticsearch.search.runtime.StringScriptFieldTermQuery; import org.elasticsearch.search.runtime.StringScriptFieldWildcardQuery; -import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.io.UncheckedIOException; @@ -1059,11 +1055,6 @@ protected SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String simpleNa if (hasScript()) { return SourceLoader.SyntheticFieldLoader.NOTHING; } - if (hasDocValues == false) { - throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" - ); - } if (fieldType().ignoreAbove() != Defaults.IGNORE_ABOVE) { throw new IllegalArgumentException( "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares ignore_above" @@ -1079,7 +1070,19 @@ protected SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String simpleNa "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares a normalizer" ); } - return new BytesSyntheticFieldLoader(name(), simpleName) { + if (fieldType.stored()) { + return new StringStoredFieldFieldLoader(name(), simpleName); + } + if (hasDocValues == false) { + throw new IllegalArgumentException( + "field [" + + name() + + "] of type [" + + typeName() + + "] doesn't support synthetic source because it doesn't have doc values and isn't stored" + ); + } + return new SortedSetDocValuesSyntheticFieldLoader(name(), simpleName) { @Override protected BytesRef convert(BytesRef value) { return value; @@ -1093,174 +1096,4 @@ protected BytesRef preserve(BytesRef value) { }; } - public abstract static class BytesSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { - private final String name; - private final String simpleName; - - public BytesSyntheticFieldLoader(String name, String simpleName) { - this.name = name; - this.simpleName = simpleName; - } - - @Override - public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException { - SortedSetDocValues dv = DocValues.getSortedSet(reader, name); - if (dv.getValueCount() == 0) { - return SourceLoader.SyntheticFieldLoader.NOTHING_LEAF; - } - if (docIdsInLeaf.length == 1) { - /* - * The singleton optimization is mostly about looking up ordinals - * in sorted order and doesn't buy anything if there is only a single - * document. - */ - return new ImmediateLeaf(dv); - } - SortedDocValues singleton = DocValues.unwrapSingleton(dv); - if (singleton != null) { - return singletonLeaf(singleton, docIdsInLeaf); - } - return new ImmediateLeaf(dv); - } - - /** - * Load all ordinals for all docs up front and resolve to their string - * values in order. This should be much more disk-friendly than - * {@link ImmediateLeaf} because it resolves the ordinals in order and - * marginally more cpu friendly because it resolves the ordinals one time. - */ - private Leaf singletonLeaf(SortedDocValues singleton, int[] docIdsInLeaf) throws IOException { - int[] ords = new int[docIdsInLeaf.length]; - int found = 0; - for (int d = 0; d < docIdsInLeaf.length; d++) { - if (false == singleton.advanceExact(docIdsInLeaf[d])) { - ords[d] = -1; - continue; - } - ords[d] = singleton.ordValue(); - found++; - } - if (found == 0) { - return SourceLoader.SyntheticFieldLoader.NOTHING_LEAF; - } - int[] sortedOrds = ords.clone(); - Arrays.sort(sortedOrds); - int unique = 0; - int prev = -1; - for (int ord : sortedOrds) { - if (ord != prev) { - prev = ord; - unique++; - } - } - int[] uniqueOrds = new int[unique]; - BytesRef[] converted = new BytesRef[unique]; - unique = 0; - prev = -1; - for (int ord : sortedOrds) { - if (ord != prev) { - prev = ord; - uniqueOrds[unique] = ord; - converted[unique] = preserve(convert(singleton.lookupOrd(ord))); - unique++; - } - } - logger.debug("loading [{}] on [{}] docs covering [{}] ords", name, docIdsInLeaf.length, uniqueOrds.length); - return new SourceLoader.SyntheticFieldLoader.Leaf() { - private int idx = -1; - - @Override - public boolean empty() { - return false; - } - - @Override - public boolean advanceToDoc(int docId) throws IOException { - idx++; - if (docIdsInLeaf[idx] != docId) { - throw new IllegalArgumentException( - "expected to be called with [" + docIdsInLeaf[idx] + "] but was called with " + docId + " instead" - ); - } - return ords[idx] >= 0; - } - - @Override - public void write(XContentBuilder b) throws IOException { - if (ords[idx] < 0) { - return; - } - int convertedIdx = Arrays.binarySearch(uniqueOrds, ords[idx]); - if (convertedIdx < 0) { - throw new IllegalStateException( - "received unexpected ord [" + ords[idx] + "]. Expected " + Arrays.toString(uniqueOrds) - ); - } - BytesRef c = converted[convertedIdx]; - b.field(simpleName).utf8Value(c.bytes, c.offset, c.length); - } - }; - } - - /** - * Load ordinals in line with populating the doc and immediately - * convert from ordinals into {@link BytesRef}s. - */ - private class ImmediateLeaf implements Leaf { - private final SortedSetDocValues dv; - private boolean hasValue; - - ImmediateLeaf(SortedSetDocValues dv) { - this.dv = dv; - } - - @Override - public boolean empty() { - return false; - } - - @Override - public boolean advanceToDoc(int docId) throws IOException { - return hasValue = dv.advanceExact(docId); - } - - @Override - public void write(XContentBuilder b) throws IOException { - if (false == hasValue) { - return; - } - long first = dv.nextOrd(); - long next = dv.nextOrd(); - if (next == SortedSetDocValues.NO_MORE_ORDS) { - BytesRef c = convert(dv.lookupOrd(first)); - b.field(simpleName).utf8Value(c.bytes, c.offset, c.length); - return; - } - b.startArray(simpleName); - BytesRef c = convert(dv.lookupOrd(first)); - b.utf8Value(c.bytes, c.offset, c.length); - c = convert(dv.lookupOrd(next)); - b.utf8Value(c.bytes, c.offset, c.length); - while ((next = dv.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) { - c = convert(dv.lookupOrd(next)); - b.utf8Value(c.bytes, c.offset, c.length); - } - b.endArray(); - } - } - - /** - * Convert a {@link BytesRef} read from the source into bytes to write - * to the xcontent. This shouldn't make a deep copy if the conversion - * process itself doesn't require one. - */ - protected abstract BytesRef convert(BytesRef value); - - /** - * Preserves {@link BytesRef bytes} returned by {@link #convert} - * to by written later. This should make a - * {@link BytesRef#deepCopyOf deep copy} if {@link #convert} didn't. - */ - protected abstract BytesRef preserve(BytesRef value); - } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 1210c23880a64..bc7ac13249876 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -14,11 +14,7 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.StoredField; -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.sandbox.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; @@ -396,7 +392,7 @@ private static void validateParsed(float value) { @Override SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String fieldSimpleName) { - return new NumericSyntheticFieldLoader(fieldName, fieldSimpleName) { + return new SortedNumericDocValuesSyntheticFieldLoader(fieldName, fieldSimpleName) { @Override protected void writeValue(XContentBuilder b, long value) throws IOException { b.value(HalfFloatPoint.sortableShortToHalfFloat((short) value)); @@ -546,7 +542,7 @@ private static void validateParsed(float value) { @Override SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String fieldSimpleName) { - return new NumericSyntheticFieldLoader(fieldName, fieldSimpleName) { + return new SortedNumericDocValuesSyntheticFieldLoader(fieldName, fieldSimpleName) { @Override protected void writeValue(XContentBuilder b, long value) throws IOException { b.value(NumericUtils.sortableIntToFloat((int) value)); @@ -674,7 +670,7 @@ private static void validateParsed(double value) { @Override SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String fieldSimpleName) { - return new NumericSyntheticFieldLoader(fieldName, fieldSimpleName) { + return new SortedNumericDocValuesSyntheticFieldLoader(fieldName, fieldSimpleName) { @Override protected void writeValue(XContentBuilder b, long value) throws IOException { b.value(NumericUtils.sortableLongToDouble(value)); @@ -1381,7 +1377,7 @@ public double reduceToStoredPrecision(double value) { abstract SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String fieldSimpleName); private static SourceLoader.SyntheticFieldLoader syntheticLongFieldLoader(String fieldName, String fieldSimpleName) { - return new NumericSyntheticFieldLoader(fieldName, fieldSimpleName) { + return new SortedNumericDocValuesSyntheticFieldLoader(fieldName, fieldSimpleName) { @Override protected void writeValue(XContentBuilder b, long value) throws IOException { b.value(value); @@ -1754,140 +1750,4 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { return type.syntheticFieldLoader(name(), simpleName()); } - public abstract static class NumericSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { - private final String name; - private final String simpleName; - - protected NumericSyntheticFieldLoader(String name, String simpleName) { - this.name = name; - this.simpleName = simpleName; - } - - @Override - public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException { - SortedNumericDocValues dv = docValuesOrNull(reader, name); - if (dv == null) { - return SourceLoader.SyntheticFieldLoader.NOTHING_LEAF; - } - if (docIdsInLeaf.length > 1) { - /* - * The singleton optimization is mostly about looking up all - * values for the field at once. If there's just a single - * document then it's just extra overhead. - */ - NumericDocValues single = DocValues.unwrapSingleton(dv); - if (single != null) { - return singletonLeaf(single, docIdsInLeaf); - } - } - return new ImmediateLeaf(dv); - } - - private class ImmediateLeaf implements Leaf { - private final SortedNumericDocValues dv; - private boolean hasValue; - - ImmediateLeaf(SortedNumericDocValues dv) { - this.dv = dv; - } - - @Override - public boolean empty() { - return false; - } - - @Override - public boolean advanceToDoc(int docId) throws IOException { - return hasValue = dv.advanceExact(docId); - } - - @Override - public void write(XContentBuilder b) throws IOException { - if (false == hasValue) { - return; - } - if (dv.docValueCount() == 1) { - b.field(simpleName); - writeValue(b, dv.nextValue()); - return; - } - b.startArray(simpleName); - for (int i = 0; i < dv.docValueCount(); i++) { - writeValue(b, dv.nextValue()); - } - b.endArray(); - } - } - - /** - * Load all values for all docs up front. This should be much more - * disk and cpu-friendly than {@link ImmediateLeaf} because it resolves - * the values all at once, always scanning forwards on the disk. - */ - private Leaf singletonLeaf(NumericDocValues singleton, int[] docIdsInLeaf) throws IOException { - long[] values = new long[docIdsInLeaf.length]; - boolean[] hasValue = new boolean[docIdsInLeaf.length]; - boolean found = false; - for (int d = 0; d < docIdsInLeaf.length; d++) { - if (false == singleton.advanceExact(docIdsInLeaf[d])) { - hasValue[d] = false; - continue; - } - hasValue[d] = true; - values[d] = singleton.longValue(); - found = true; - } - if (found == false) { - return SourceLoader.SyntheticFieldLoader.NOTHING_LEAF; - } - return new Leaf() { - private int idx = -1; - - @Override - public boolean empty() { - return false; - } - - @Override - public boolean advanceToDoc(int docId) throws IOException { - idx++; - if (docIdsInLeaf[idx] != docId) { - throw new IllegalArgumentException( - "expected to be called with [" + docIdsInLeaf[idx] + "] but was called with " + docId + " instead" - ); - } - return hasValue[idx]; - } - - @Override - public void write(XContentBuilder b) throws IOException { - if (hasValue[idx] == false) { - return; - } - b.field(simpleName); - writeValue(b, values[idx]); - } - }; - } - - /** - * Returns a {@link SortedNumericDocValues} or null if it doesn't have any doc values. - * See {@link DocValues#getSortedNumeric} which is *nearly* the same, but it returns - * an "empty" implementation if there aren't any doc values. We need to be able to - * tell if there aren't any and return our empty leaf source loader. - */ - public static SortedNumericDocValues docValuesOrNull(LeafReader reader, String fieldName) throws IOException { - SortedNumericDocValues dv = reader.getSortedNumericDocValues(fieldName); - if (dv != null) { - return dv; - } - NumericDocValues single = reader.getNumericDocValues(fieldName); - if (single != null) { - return DocValues.singleton(single); - } - return null; - } - - protected abstract void writeValue(XContentBuilder b, long value) throws IOException; - } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 636cd3628beeb..f391b0c9a7833 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReader; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; @@ -28,6 +29,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.stream.Stream; public class ObjectMapper extends Mapper implements Cloneable { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ObjectMapper.class); @@ -567,52 +569,68 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - List fields = mappers.values() - .stream() - .sorted(Comparator.comparing(Mapper::name)) - .map(Mapper::syntheticFieldLoader) - .filter(l -> l != null) - .toList(); - return (reader, docIdsInLeaf) -> { - List l = new ArrayList<>(); - for (SourceLoader.SyntheticFieldLoader field : fields) { - SourceLoader.SyntheticFieldLoader.Leaf leaf = field.leaf(reader, docIdsInLeaf); - if (false == leaf.empty()) { - l.add(leaf); - } - } - SourceLoader.SyntheticFieldLoader.Leaf[] leaves = l.toArray(SourceLoader.SyntheticFieldLoader.Leaf[]::new); - return new SourceLoader.SyntheticFieldLoader.Leaf() { - private boolean hasValue; + return new SyntheticSourceFieldLoader( + mappers.values() + .stream() + .sorted(Comparator.comparing(Mapper::name)) + .map(Mapper::syntheticFieldLoader) + .filter(l -> l != null) + .toList() + ); + } - @Override - public boolean empty() { - return leaves.length == 0; - } + private class SyntheticSourceFieldLoader implements SourceLoader.SyntheticFieldLoader { + private final List fields; + private boolean hasValue; - @Override - public boolean advanceToDoc(int docId) throws IOException { - hasValue = false; - for (SourceLoader.SyntheticFieldLoader.Leaf leaf : leaves) { - boolean leafHasValue = leaf.advanceToDoc(docId); - hasValue |= leafHasValue; - } - return hasValue; - } + private SyntheticSourceFieldLoader(List fields) { + this.fields = fields; + } - @Override - public void write(XContentBuilder b) throws IOException { - if (hasValue == false) { - return; - } - startSyntheticField(b); - for (SourceLoader.SyntheticFieldLoader.Leaf leaf : leaves) { - leaf.write(b); - } - b.endObject(); + @Override + public Stream> storedFieldLoaders() { + return fields.stream().flatMap(SourceLoader.SyntheticFieldLoader::storedFieldLoaders).map(e -> Map.entry(e.getKey(), values -> { + hasValue = true; + e.getValue().load(values); + })); + } + + @Override + public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { + List loaders = new ArrayList<>(); + for (SourceLoader.SyntheticFieldLoader field : fields) { + SourceLoader.SyntheticFieldLoader.DocValuesLoader loader = field.docValuesLoader(leafReader, docIdsInLeaf); + if (loader != null) { + loaders.add(loader); } + } + return docId -> { + for (SourceLoader.SyntheticFieldLoader.DocValuesLoader docValueLoader : loaders) { + boolean leafHasValue = docValueLoader.advanceToDoc(docId); + hasValue |= leafHasValue; + } + /* + * Important and kind of sneaky note: this will return true + * if there were any values loaded from stored fields as + * well. That *is* how we "wake up" objects that contain just + * stored field. + */ + return hasValue; }; - }; + } + + @Override + public void write(XContentBuilder b) throws IOException { + if (hasValue == false) { + return; + } + startSyntheticField(b); + for (SourceLoader.SyntheticFieldLoader field : fields) { + field.write(b); + } + b.endObject(); + hasValue = false; + } } protected void startSyntheticField(XContentBuilder b) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java new file mode 100644 index 0000000000000..0ad69a0498a6c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java @@ -0,0 +1,202 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.stream.Stream; + +/** + * Load {@code _source} fields from {@link SortedNumericDocValues}. + */ +public abstract class SortedNumericDocValuesSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + private final String name; + private final String simpleName; + private Values values = NO_VALUES; + + protected SortedNumericDocValuesSyntheticFieldLoader(String name, String simpleName) { + this.name = name; + this.simpleName = simpleName; + } + + protected abstract void writeValue(XContentBuilder b, long value) throws IOException; + + @Override + public Stream> storedFieldLoaders() { + return Stream.of(); + } + + @Override + public DocValuesLoader docValuesLoader(LeafReader reader, int[] docIdsInLeaf) throws IOException { + SortedNumericDocValues dv = docValuesOrNull(reader, name); + if (dv == null) { + values = NO_VALUES; + return null; + } + if (docIdsInLeaf.length > 1) { + /* + * The singleton optimization is mostly about looking up all + * values for the field at once. If there's just a single + * document then it's just extra overhead. + */ + NumericDocValues single = DocValues.unwrapSingleton(dv); + if (single != null) { + SingletonDocValuesLoader loader = buildSingletonDocValuesLoader(single, docIdsInLeaf); + values = loader == null ? NO_VALUES : loader; + return loader; + } + } + ImmediateDocValuesLoader loader = new ImmediateDocValuesLoader(dv); + values = loader; + return loader; + } + + @Override + public void write(XContentBuilder b) throws IOException { + switch (values.count()) { + case 0: + return; + case 1: + b.field(simpleName); + values.write(b); + return; + default: + b.startArray(simpleName); + values.write(b); + b.endArray(); + return; + } + } + + private interface Values { + int count(); + + void write(XContentBuilder b) throws IOException; + } + + private static final Values NO_VALUES = new Values() { + @Override + public int count() { + return 0; + } + + @Override + public void write(XContentBuilder b) throws IOException {} + }; + + private class ImmediateDocValuesLoader implements DocValuesLoader, Values { + private final SortedNumericDocValues dv; + private boolean hasValue; + + ImmediateDocValuesLoader(SortedNumericDocValues dv) { + this.dv = dv; + } + + @Override + public boolean advanceToDoc(int docId) throws IOException { + return hasValue = dv.advanceExact(docId); + } + + @Override + public int count() { + return hasValue ? dv.docValueCount() : 0; + } + + @Override + public void write(XContentBuilder b) throws IOException { + for (int i = 0; i < dv.docValueCount(); i++) { + writeValue(b, dv.nextValue()); + } + } + } + + private SingletonDocValuesLoader buildSingletonDocValuesLoader(NumericDocValues singleton, int[] docIdsInLeaf) throws IOException { + long[] values = new long[docIdsInLeaf.length]; + boolean[] hasValue = new boolean[docIdsInLeaf.length]; + boolean found = false; + for (int d = 0; d < docIdsInLeaf.length; d++) { + if (false == singleton.advanceExact(docIdsInLeaf[d])) { + hasValue[d] = false; + continue; + } + hasValue[d] = true; + values[d] = singleton.longValue(); + found = true; + } + if (found == false) { + return null; + } + return new SingletonDocValuesLoader(docIdsInLeaf, values, hasValue); + } + + /** + * Load all values for all docs up front. This should be much more + * disk and cpu-friendly than {@link ImmediateDocValuesLoader} because + * it resolves the values all at once, always scanning forwards on + * the disk. + */ + private class SingletonDocValuesLoader implements DocValuesLoader, Values { + private final int[] docIdsInLeaf; + private final long[] values; + private final boolean[] hasValue; + private int idx = -1; + + private SingletonDocValuesLoader(int[] docIdsInLeaf, long[] values, boolean[] hasValue) { + this.docIdsInLeaf = docIdsInLeaf; + this.values = values; + this.hasValue = hasValue; + } + + @Override + public boolean advanceToDoc(int docId) throws IOException { + idx++; + if (docIdsInLeaf[idx] != docId) { + throw new IllegalArgumentException( + "expected to be called with [" + docIdsInLeaf[idx] + "] but was called with " + docId + " instead" + ); + } + return hasValue[idx]; + } + + @Override + public int count() { + return hasValue[idx] ? 1 : 0; + } + + @Override + public void write(XContentBuilder b) throws IOException { + assert hasValue[idx]; + writeValue(b, values[idx]); + } + } + + /** + * Returns a {@link SortedNumericDocValues} or null if it doesn't have any doc values. + * See {@link DocValues#getSortedNumeric} which is *nearly* the same, but it returns + * an "empty" implementation if there aren't any doc values. We need to be able to + * tell if there aren't any and return our empty leaf source loader. + */ + public static SortedNumericDocValues docValuesOrNull(LeafReader reader, String fieldName) throws IOException { + SortedNumericDocValues dv = reader.getSortedNumericDocValues(fieldName); + if (dv != null) { + return dv; + } + NumericDocValues single = reader.getNumericDocValues(fieldName); + if (single != null) { + return DocValues.singleton(single); + } + return null; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoader.java new file mode 100644 index 0000000000000..321d1620c41b6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoader.java @@ -0,0 +1,237 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.stream.Stream; + +/** + * Load {@code _source} fields from {@link SortedSetDocValues}. + */ +public abstract class SortedSetDocValuesSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + private static final Logger logger = LogManager.getLogger(SortedSetDocValuesSyntheticFieldLoader.class); + + private final String name; + private final String simpleName; + private Values values = NO_VALUES; + + public SortedSetDocValuesSyntheticFieldLoader(String name, String simpleName) { + this.name = name; + this.simpleName = simpleName; + } + + @Override + public Stream> storedFieldLoaders() { + return Stream.of(); + } + + @Override + public DocValuesLoader docValuesLoader(LeafReader reader, int[] docIdsInLeaf) throws IOException { + SortedSetDocValues dv = DocValues.getSortedSet(reader, name); + if (dv.getValueCount() == 0) { + values = NO_VALUES; + return null; + } + if (docIdsInLeaf.length > 1) { + /* + * The singleton optimization is mostly about looking up ordinals + * in sorted order and doesn't buy anything if there is only a single + * document. + */ + SortedDocValues singleton = DocValues.unwrapSingleton(dv); + if (singleton != null) { + SingletonDocValuesLoader loader = buildSingletonDocValuesLoader(singleton, docIdsInLeaf); + values = loader == null ? NO_VALUES : loader; + return loader; + } + } + ImmediateDocValuesLoader loader = new ImmediateDocValuesLoader(dv); + values = loader; + return loader; + } + + @Override + public void write(XContentBuilder b) throws IOException { + switch (values.count()) { + case 0: + return; + case 1: + b.field(simpleName); + values.write(b); + return; + default: + b.startArray(simpleName); + values.write(b); + b.endArray(); + return; + } + } + + private interface Values { + int count(); + + void write(XContentBuilder b) throws IOException; + } + + private static final Values NO_VALUES = new Values() { + @Override + public int count() { + return 0; + } + + @Override + public void write(XContentBuilder b) throws IOException {} + }; + + /** + * Load ordinals in line with populating the doc and immediately + * convert from ordinals into {@link BytesRef}s. + */ + private class ImmediateDocValuesLoader implements DocValuesLoader, Values { + private final SortedSetDocValues dv; + private boolean hasValue; + + ImmediateDocValuesLoader(SortedSetDocValues dv) { + this.dv = dv; + } + + @Override + public boolean advanceToDoc(int docId) throws IOException { + return hasValue = dv.advanceExact(docId); + } + + @Override + public int count() { + return hasValue ? dv.docValueCount() : 0; + } + + @Override + public void write(XContentBuilder b) throws IOException { + assert hasValue; + for (int i = 0; i < dv.docValueCount(); i++) { + BytesRef c = convert(dv.lookupOrd(dv.nextOrd())); + b.utf8Value(c.bytes, c.offset, c.length); + } + } + } + + /** + * Load all ordinals for all docs up front and resolve to their string + * values in order. This should be much more disk-friendly than + * {@link ImmediateDocValuesLoader} because it resolves the ordinals in order and + * marginally more cpu friendly because it resolves the ordinals one time. + */ + private SingletonDocValuesLoader buildSingletonDocValuesLoader(SortedDocValues singleton, int[] docIdsInLeaf) throws IOException { + int[] ords = new int[docIdsInLeaf.length]; + int found = 0; + for (int d = 0; d < docIdsInLeaf.length; d++) { + if (false == singleton.advanceExact(docIdsInLeaf[d])) { + ords[d] = -1; + continue; + } + ords[d] = singleton.ordValue(); + found++; + } + if (found == 0) { + return null; + } + int[] sortedOrds = ords.clone(); + Arrays.sort(sortedOrds); + int unique = 0; + int prev = -1; + for (int ord : sortedOrds) { + if (ord != prev) { + prev = ord; + unique++; + } + } + int[] uniqueOrds = new int[unique]; + BytesRef[] converted = new BytesRef[unique]; + unique = 0; + prev = -1; + for (int ord : sortedOrds) { + if (ord != prev) { + prev = ord; + uniqueOrds[unique] = ord; + converted[unique] = preserve(convert(singleton.lookupOrd(ord))); + unique++; + } + } + logger.debug("loading [{}] on [{}] docs covering [{}] ords", name, docIdsInLeaf.length, uniqueOrds.length); + return new SingletonDocValuesLoader(docIdsInLeaf, ords, uniqueOrds, converted); + } + + private class SingletonDocValuesLoader implements DocValuesLoader, Values { + private final int[] docIdsInLeaf; + private final int[] ords; + private final int[] uniqueOrds; + private final BytesRef[] converted; + + private int idx = -1; + + private SingletonDocValuesLoader(int[] docIdsInLeaf, int[] ords, int[] uniqueOrds, BytesRef[] converted) { + this.docIdsInLeaf = docIdsInLeaf; + this.ords = ords; + this.uniqueOrds = uniqueOrds; + this.converted = converted; + } + + @Override + public boolean advanceToDoc(int docId) throws IOException { + idx++; + if (docIdsInLeaf[idx] != docId) { + throw new IllegalArgumentException( + "expected to be called with [" + docIdsInLeaf[idx] + "] but was called with " + docId + " instead" + ); + } + return ords[idx] >= 0; + } + + @Override + public int count() { + return ords[idx] < 0 ? 0 : 1; + } + + @Override + public void write(XContentBuilder b) throws IOException { + assert ords[idx] >= 0; + int convertedIdx = Arrays.binarySearch(uniqueOrds, ords[idx]); + if (convertedIdx < 0) { + throw new IllegalStateException("received unexpected ord [" + ords[idx] + "]. Expected " + Arrays.toString(uniqueOrds)); + } + BytesRef c = converted[convertedIdx]; + b.utf8Value(c.bytes, c.offset, c.length); + } + } + + /** + * Convert a {@link BytesRef} read from the source into bytes to write + * to the xcontent. This shouldn't make a deep copy if the conversion + * process itself doesn't require one. + */ + protected abstract BytesRef convert(BytesRef value); + + /** + * Preserves {@link BytesRef bytes} returned by {@link #convert} + * to by written later. This should make a + * {@link BytesRef#deepCopyOf deep copy} if {@link #convert} didn't. + */ + protected abstract BytesRef preserve(BytesRef value); +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java index 351608c9a20cf..70111f1e48ffe 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java @@ -16,6 +16,11 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * Loads source {@code _source} during a GET or {@code _search}. @@ -31,6 +36,12 @@ public interface SourceLoader { */ Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException; + /** + * Stream containing all non-{@code _source} stored fields required + * to build the {@code _source}. + */ + Set requiredStoredFields(); + /** * Loads {@code _source} from some segment. */ @@ -64,6 +75,11 @@ public boolean reordersFieldValues() { public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) { return (fieldsVisitor, docId) -> fieldsVisitor.source(); } + + @Override + public Set requiredStoredFields() { + return Set.of(); + } }; /** @@ -71,9 +87,11 @@ public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) { */ class Synthetic implements SourceLoader { private final SyntheticFieldLoader loader; + private final Map storedFieldLoaders; public Synthetic(Mapping mapping) { loader = mapping.getRoot().syntheticFieldLoader(); + storedFieldLoaders = Map.copyOf(loader.storedFieldLoaders().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))); } @Override @@ -81,17 +99,27 @@ public boolean reordersFieldValues() { return true; } + @Override + public Set requiredStoredFields() { + return storedFieldLoaders.keySet(); + } + @Override public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException { - SyntheticFieldLoader.Leaf leaf = loader.leaf(reader, docIdsInLeaf); - if (leaf.empty()) { - return Leaf.EMPTY_OBJECT; - } + SyntheticFieldLoader.DocValuesLoader leaf = loader.docValuesLoader(reader, docIdsInLeaf); return (fieldsVisitor, docId) -> { + if (fieldsVisitor != null) { + for (Map.Entry> e : fieldsVisitor.fields().entrySet()) { + SyntheticFieldLoader.StoredFieldLoader loader = storedFieldLoaders.get(e.getKey()); + if (loader != null) { + loader.load(e.getValue()); + } + } + } // TODO accept a requested xcontent type try (XContentBuilder b = new XContentBuilder(JsonXContent.jsonXContent, new ByteArrayOutputStream())) { if (leaf.advanceToDoc(docId)) { - leaf.write(b); + loader.write(b); } else { b.startObject().endObject(); } @@ -103,54 +131,89 @@ public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException { /** * Load a field for {@link Synthetic}. + *

    + * {@link SyntheticFieldLoader}s load values through objects vended + * by their {@link #storedFieldLoaders} and {@link #docValuesLoader} + * methods. Then you call {@link #write} to write the values to an + * {@link XContentBuilder} which also clears them. + *

    + * This two loaders and one writer setup is specifically designed to + * efficiently load the {@code _source} of indices that have thousands + * of fields declared in the mapping but that only have values for + * dozens of them. It handles this in a few ways: + *

      + *
    • {@link #docValuesLoader} must be called once per document + * per field to load the doc values, but detects up front if + * there are no doc values for that field. It's linear with + * the number of fields, whether or not they have values, + * but skips entirely missing fields.
    • + *
    • {@link #storedFieldLoaders} are only called when the + * document contains a stored field with the appropriate name. + * So it's fine to have thousands of these declared in the + * mapping and you don't really pay much to load them. Just + * the cost to build {@link Map} used to address them.
    • + *
    • Object fields that don't have any values loaded by either + * means bail out of the loading process and don't pass + * control down to any of their children. Thus it's fine + * to declare huge object structures in the mapping and + * you only spend time iterating the ones you need. Or that + * have doc values.
    • + *
    */ interface SyntheticFieldLoader { /** * Load no values. */ - SyntheticFieldLoader.Leaf NOTHING_LEAF = new Leaf() { + SyntheticFieldLoader NOTHING = new SyntheticFieldLoader() { @Override - public boolean empty() { - return true; + public Stream> storedFieldLoaders() { + return Stream.of(); } @Override - public boolean advanceToDoc(int docId) throws IOException { - return false; + public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { + return null; } @Override - public void write(XContentBuilder b) throws IOException {} + public void write(XContentBuilder b) {} }; /** - * Load no values. + * A {@link Stream} mapping stored field paths to a place to put them + * so they can be included in the next document. */ - SyntheticFieldLoader NOTHING = (r, docIds) -> NOTHING_LEAF; + Stream> storedFieldLoaders(); /** - * Build a loader for this field in the provided segment. + * Build something to load doc values for this field or return + * {@code null} if there are no doc values for this field to + * load. */ - Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException; + DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException; /** - * Loads values for a field in a particular leaf. + * Write values for this document. */ - interface Leaf { - /** - * Is this entirely empty? - */ - boolean empty(); + void write(XContentBuilder b) throws IOException; - /** - * Position the loader at a document. - */ - boolean advanceToDoc(int docId) throws IOException; + /** + * Sync for stored field values. + */ + interface StoredFieldLoader { + void load(List values); + } + /** + * Loads doc values for a field. + */ + interface DocValuesLoader { /** - * Write values for this document. + * Load the doc values for this field. + * + * @return whether or not there are any values for this field */ - void write(XContentBuilder b) throws IOException; + boolean advanceToDoc(int docId) throws IOException; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java new file mode 100644 index 0000000000000..0cd65d0ee3059 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.LeafReader; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.stream.Stream; + +public class StringStoredFieldFieldLoader + implements + SourceLoader.SyntheticFieldLoader, + SourceLoader.SyntheticFieldLoader.StoredFieldLoader { + private final String name; + private final String simpleName; + private List values; + + public StringStoredFieldFieldLoader(String name, String simpleName) { + this.name = name; + this.simpleName = simpleName; + } + + @Override + public Stream> storedFieldLoaders() { + return Stream.of(Map.entry(name, this)); + } + + @Override + public void load(List values) { + this.values = values; + } + + @Override + public void write(XContentBuilder b) throws IOException { + if (values == null || values.isEmpty()) { + return; + } + if (values.size() == 1) { + b.field(simpleName, values.get(0).toString()); + values = null; + return; + } + b.startArray(simpleName); + for (Object value : values) { + b.value(value.toString()); + } + b.endArray(); + values = null; + } + + @Override + public final DocValuesLoader docValuesLoader(LeafReader reader, int[] docIdsInLeaf) throws IOException { + return null; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 852d03bcfafff..721f0bbc7ab61 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -1272,12 +1272,15 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } + if (store) { + return new StringStoredFieldFieldLoader(name(), simpleName()); + } for (Mapper sub : this) { if (sub.typeName().equals(KeywordFieldMapper.CONTENT_TYPE)) { KeywordFieldMapper kwd = (KeywordFieldMapper) sub; - if (kwd.fieldType().hasDocValues() - && kwd.hasNormalizer() == false - && kwd.fieldType().ignoreAbove() == KeywordFieldMapper.Defaults.IGNORE_ABOVE) { + if (kwd.hasNormalizer() == false + && kwd.fieldType().ignoreAbove() == KeywordFieldMapper.Defaults.IGNORE_ABOVE + && (kwd.fieldType().hasDocValues() || kwd.fieldType().isStored())) { return kwd.syntheticFieldLoader(simpleName()); } @@ -1286,8 +1289,8 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { throw new IllegalArgumentException( String.format( Locale.ROOT, - "field [%s] of type [%s] doesn't support synthetic source unless it has a sub-field of" - + " type [keyword] with doc values enabled and without ignore_above or a normalizer", + "field [%s] of type [%s] doesn't support synthetic source unless it is stored or has a sub-field of" + + " type [keyword] with doc values or stored and without ignore_above or a normalizer", name(), typeName() ) diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 79492167596d3..6b2fddd7fa0ee 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -109,8 +109,9 @@ private SearchHits buildSearchHits(SearchContext context, Profiler profiler) { // make sure that we iterate in doc id order Arrays.sort(docs); + SourceLoader sourceLoader = context.newSourceLoader(); Map> storedToRequestedFields = new HashMap<>(); - FieldsVisitor fieldsVisitor = createStoredFieldsVisitor(context, storedToRequestedFields); + FieldsVisitor fieldsVisitor = createStoredFieldsVisitor(context, storedToRequestedFields, sourceLoader); profiler.visitor(fieldsVisitor); FetchContext fetchContext = new FetchContext(context); @@ -241,7 +242,11 @@ public int compareTo(DocIdToIndex o) { } } - private static FieldsVisitor createStoredFieldsVisitor(SearchContext context, Map> storedToRequestedFields) { + private static FieldsVisitor createStoredFieldsVisitor( + SearchContext context, + Map> storedToRequestedFields, + SourceLoader sourceLoader + ) { StoredFieldsContext storedFieldsContext = context.storedFieldsContext(); if (storedFieldsContext == null) { @@ -250,6 +255,11 @@ private static FieldsVisitor createStoredFieldsVisitor(SearchContext context, Ma context.fetchSourceContext(FetchSourceContext.FETCH_SOURCE); } boolean loadSource = sourceRequired(context); + if (loadSource) { + if (false == sourceLoader.requiredStoredFields().isEmpty()) { + return new CustomFieldsVisitor(sourceLoader.requiredStoredFields(), true); + } + } return new FieldsVisitor(loadSource); } else if (storedFieldsContext.fetchFields() == false) { // disable stored fields entirely @@ -273,6 +283,9 @@ private static FieldsVisitor createStoredFieldsVisitor(SearchContext context, Ma } } boolean loadSource = sourceRequired(context); + if (loadSource) { + sourceLoader.requiredStoredFields().forEach(fieldName -> storedToRequestedFields.putIfAbsent(fieldName, Set.of())); + } if (storedToRequestedFields.isEmpty()) { // empty list specified, default to disable _source if no explicit indication return new FieldsVisitor(loadSource); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index d7c8fb8314b04..1df397088d027 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -623,11 +623,17 @@ public void testKeywordFieldUtf8LongerThan32766() throws Exception { @Override protected SyntheticSourceSupport syntheticSourceSupport() { - return new KeywordSyntheticSourceSupport(); + return new KeywordSyntheticSourceSupport(randomBoolean(), usually() ? null : randomAlphaOfLength(2)); } static class KeywordSyntheticSourceSupport implements SyntheticSourceSupport { - private final String nullValue = usually() ? null : randomAlphaOfLength(2); + private final boolean store; + private final String nullValue; + + KeywordSyntheticSourceSupport(boolean store, String nullValue) { + this.store = store; + this.nullValue = nullValue; + } @Override public SyntheticSourceExample example(int maxValues) { @@ -637,7 +643,9 @@ public SyntheticSourceExample example(int maxValues) { } List> values = randomList(1, maxValues, this::generateValue); List in = values.stream().map(Tuple::v1).toList(); - List outList = values.stream().map(Tuple::v2).collect(Collectors.toSet()).stream().sorted().toList(); + List outList = store + ? values.stream().map(Tuple::v2).toList() + : values.stream().map(Tuple::v2).collect(Collectors.toSet()).stream().sorted().toList(); Object out = outList.size() == 1 ? outList.get(0) : outList; return new SyntheticSourceExample(in, out, this::mapping); } @@ -655,13 +663,22 @@ private void mapping(XContentBuilder b) throws IOException { if (nullValue != null) { b.field("null_value", nullValue); } + if (store) { + b.field("store", true); + if (randomBoolean()) { + b.field("doc_values", false); + } + } } @Override public List invalidExample() throws IOException { return List.of( new SyntheticSourceInvalidExample( - equalTo("field [field] of type [keyword] doesn't support synthetic source because it doesn't have doc values"), + equalTo( + "field [field] of type [keyword] doesn't support synthetic source because " + + "it doesn't have doc values and isn't stored" + ), b -> b.field("type", "keyword").field("doc_values", false) ), new SyntheticSourceInvalidExample( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceLoaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceLoaderTests.java index b33df2b94c370..70ab6a1e6c998 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceLoaderTests.java @@ -41,8 +41,8 @@ public void testUnsupported() throws IOException { assertThat( e.getMessage(), equalTo( - "field [txt] of type [text] doesn't support synthetic source unless" - + " it has a sub-field of type [keyword] with doc values enabled and without ignore_above or a normalizer" + "field [txt] of type [text] doesn't support synthetic source unless it is stored or has a sub-field " + + "of type [keyword] with doc values or stored and without ignore_above or a normalizer" ) ); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 3950e64a1fc60..5d0cf572ffdd9 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -1095,11 +1095,21 @@ protected void randomFetchTestFieldConfig(XContentBuilder b) throws IOException @Override protected SyntheticSourceSupport syntheticSourceSupport() { - SyntheticSourceSupport supportDelegate = new KeywordFieldMapperTests.KeywordSyntheticSourceSupport(); + boolean storeTextField = randomBoolean(); + boolean storedKeywordField = storeTextField || randomBoolean(); + String nullValue = storeTextField || usually() ? null : randomAlphaOfLength(2); return new SyntheticSourceSupport() { @Override - public SyntheticSourceExample example(int maxValues) throws IOException { - SyntheticSourceExample delegate = supportDelegate.example(maxValues); + public SyntheticSourceExample example(int maxValues) { + SyntheticSourceExample delegate = new KeywordFieldMapperTests.KeywordSyntheticSourceSupport(storedKeywordField, nullValue) + .example(maxValues); + if (storeTextField) { + return new SyntheticSourceExample( + delegate.inputValue(), + delegate.result(), + b -> b.field("type", "text").field("store", true) + ); + } return new SyntheticSourceExample(delegate.inputValue(), delegate.result(), b -> { b.field("type", "text"); b.startObject("fields"); @@ -1115,8 +1125,8 @@ public SyntheticSourceExample example(int maxValues) throws IOException { @Override public List invalidExample() throws IOException { Matcher err = equalTo( - "field [field] of type [text] doesn't support synthetic source " - + "unless it has a sub-field of type [keyword] with doc values enabled and without ignore_above or a normalizer" + "field [field] of type [text] doesn't support synthetic source unless it is stored or" + + " has a sub-field of type [keyword] with doc values or stored and without ignore_above or a normalizer" ); return List.of( new SyntheticSourceInvalidExample(err, TextFieldMapperTests.this::minimalMapping), diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 45d55fa86f57d..84df3ae8d076c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -13,6 +13,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReader; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -41,6 +42,8 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor; +import org.elasticsearch.index.fieldvisitor.FieldsVisitor; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.support.NestedScope; @@ -658,8 +661,7 @@ protected final String syntheticSource(DocumentMapper mapper, CheckedConsumer metrics; + private final Map metricDocValues = new EnumMap<>(Metric.class); + private final Set metricHasValue = EnumSet.noneOf(Metric.class); protected AggregateMetricSyntheticFieldLoader(String name, String simpleName, EnumSet metrics) { this.name = name; @@ -699,37 +703,49 @@ protected AggregateMetricSyntheticFieldLoader(String name, String simpleName, En } @Override - public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException { - Map metricDocValues = new EnumMap<>(Metric.class); + public Stream> storedFieldLoaders() { + return Stream.of(); + } + + @Override + public DocValuesLoader docValuesLoader(LeafReader reader, int[] docIdsInLeaf) throws IOException { + metricDocValues.clear(); for (Metric m : metrics) { String fieldName = subfieldName(name, m); - SortedNumericDocValues dv = NumberFieldMapper.NumericSyntheticFieldLoader.docValuesOrNull(reader, fieldName); + SortedNumericDocValues dv = SortedNumericDocValuesSyntheticFieldLoader.docValuesOrNull(reader, fieldName); if (dv != null) { metricDocValues.put(m, dv); } } if (metricDocValues.isEmpty()) { - return SourceLoader.SyntheticFieldLoader.NOTHING_LEAF; + return null; } - return new AggregateMetricSyntheticFieldLoader.ImmediateLeaf(metricDocValues); + return new AggregateDocValuesLoader(); } - private class ImmediateLeaf implements Leaf { - private final Map metricDocValues; - private final Set metricHasValue = EnumSet.noneOf(Metric.class); - - ImmediateLeaf(Map metricDocValues) { - assert metricDocValues.isEmpty() == false : "doc_values for metrics cannot be empty"; - this.metricDocValues = metricDocValues; + @Override + public void write(XContentBuilder b) throws IOException { + if (metricHasValue.isEmpty()) { + return; } - - @Override - public boolean empty() { - return false; + b.startObject(simpleName); + for (Map.Entry entry : metricDocValues.entrySet()) { + if (metricHasValue.contains(entry.getKey())) { + String metricName = entry.getKey().name(); + long value = entry.getValue().nextValue(); + if (entry.getKey() == Metric.value_count) { + b.field(metricName, value); + } else { + b.field(metricName, NumericUtils.sortableLongToDouble(value)); + } + } } + b.endObject(); + } + private class AggregateDocValuesLoader implements DocValuesLoader { @Override public boolean advanceToDoc(int docId) throws IOException { // It is required that all defined metrics must exist. In this case @@ -745,26 +761,6 @@ public boolean advanceToDoc(int docId) throws IOException { return metricHasValue.isEmpty() == false; } - - @Override - public void write(XContentBuilder b) throws IOException { - if (metricHasValue.isEmpty()) { - return; - } - b.startObject(simpleName); - for (Map.Entry entry : metricDocValues.entrySet()) { - if (metricHasValue.contains(entry.getKey())) { - String metricName = entry.getKey().name(); - long value = entry.getValue().nextValue(); - if (entry.getKey() == Metric.value_count) { - b.field(metricName, value); - } else { - b.field(metricName, NumericUtils.sortableLongToDouble(value)); - } - } - } - b.endObject(); - } } } } diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index da1cdd9e78a33..a070b828aa32b 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.constantkeyword.mapper; +import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; @@ -51,6 +52,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.stream.Stream; /** * A {@link FieldMapper} that assigns every document the same value. @@ -309,15 +311,25 @@ protected String contentType() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - return (reader, docIdsInLeaf) -> new SourceLoader.SyntheticFieldLoader.Leaf() { + return new SourceLoader.SyntheticFieldLoader() { @Override - public boolean empty() { - return fieldType().value == null; + public Stream> storedFieldLoaders() { + return Stream.of(); } @Override - public boolean advanceToDoc(int docId) throws IOException { - return fieldType().value != null; + public DocValuesLoader docValuesLoader(LeafReader reader, int[] docIdsInLeaf) { + /* + * If there is a value we need to enable objects containing these + * fields. We could build something special for fields that are + * always "on", but constant_keyword fields are rare enough that + * having an extra doc values loader that always returns `true` + * isn't a big performance hit and gets the job done. + */ + if (fieldType().value == null) { + return null; + } + return docId -> true; } @Override From ad612746c6fbb8fb8784b44da7bb0d29ee7d5b5e Mon Sep 17 00:00:00 2001 From: mushaoqiong Date: Wed, 17 Aug 2022 22:29:15 +0800 Subject: [PATCH 240/265] move log-related logic into log block in IndexLifecycleRunner (#89292) This PR moved some logic that is only used by log in IndexLifecycleRunner#isReadyToRansitionToThisPhase(...) into trace-log- block which may save cpu and memory if trace log was not enabled. --- docs/changelog/89292.yaml | 5 +++++ .../xpack/ilm/IndexLifecycleRunner.java | 18 +++++++++--------- 2 files changed, 14 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/89292.yaml diff --git a/docs/changelog/89292.yaml b/docs/changelog/89292.yaml new file mode 100644 index 0000000000000..d25faa90ee90c --- /dev/null +++ b/docs/changelog/89292.yaml @@ -0,0 +1,5 @@ +pr: 89292 +summary: move log-related logic into log block in IndexLifecycleRunner +area: ILM+SLM +type: enhancement +issues: [] diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java index d450cbd7f37ad..68208dc808465 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java @@ -142,16 +142,16 @@ boolean isReadyToTransitionToThisPhase(final String policy, final IndexMetadata } final TimeValue after = stepRegistry.getIndexAgeForPhase(policy, phase); final long now = nowSupplier.getAsLong(); - final long ageMillis = now - lifecycleDate; - final TimeValue age; - if (ageMillis >= 0) { - age = new TimeValue(ageMillis); - } else if (ageMillis == Long.MIN_VALUE) { - age = new TimeValue(Long.MAX_VALUE); - } else { - age = new TimeValue(-ageMillis); - } if (logger.isTraceEnabled()) { + final long ageMillis = now - lifecycleDate; + final TimeValue age; + if (ageMillis >= 0) { + age = new TimeValue(ageMillis); + } else if (ageMillis == Long.MIN_VALUE) { + age = new TimeValue(Long.MAX_VALUE); + } else { + age = new TimeValue(-ageMillis); + } logger.trace( "[{}] checking for index age to be at least [{}] before performing actions in " + "the \"{}\" phase. Now: {}, lifecycle date: {}, age: [{}{}/{}s]", From a1056f1e00f5453d29c37769ab4f1b3f3be84e19 Mon Sep 17 00:00:00 2001 From: Adam Michalik Date: Wed, 17 Aug 2022 16:52:34 +0200 Subject: [PATCH 241/265] Docs: Correct ctx.op value to valid 'noop' (#89391) The documentation mentions the `noop` value ("Otherwise it does nothing (`noop`)"), however, the value used in the example script is `none`. This change corrects the value in the example script to `noop`. --- docs/reference/docs/update.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 287427a30834f..989335eb702f4 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -219,7 +219,7 @@ the `tags` field contains `green`, otherwise it does nothing (`noop`): POST test/_update/1 { "script": { - "source": "if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'none' }", + "source": "if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'noop' }", "lang": "painless", "params": { "tag": "green" From fe8e58658bb32e79f9987ae8789aa65364e1dee0 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Wed, 17 Aug 2022 08:10:17 -0700 Subject: [PATCH 242/265] Add source fallback support for unsigned long mapped type (#89349) This change adds source fallback support for unsigned long by adding a new SourceValueFetcherSortedUnsignedLongIndexFieldData similar to the other numeric types. --- docs/changelog/89349.yaml | 5 + ...tcherSortedUnsignedLongIndexFieldData.java | 165 ++++++++++++++++++ .../unsignedlong/UnsignedLongFieldMapper.java | 46 ++++- .../rest-api-spec/test/50_script_values.yml | 101 ++++++++++- 4 files changed, 303 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/89349.yaml create mode 100644 x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/SourceValueFetcherSortedUnsignedLongIndexFieldData.java diff --git a/docs/changelog/89349.yaml b/docs/changelog/89349.yaml new file mode 100644 index 0000000000000..1e7c0c848f672 --- /dev/null +++ b/docs/changelog/89349.yaml @@ -0,0 +1,5 @@ +pr: 89349 +summary: Add source fallback support for unsigned long mapped type +area: Mapping +type: enhancement +issues: [] diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/SourceValueFetcherSortedUnsignedLongIndexFieldData.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/SourceValueFetcherSortedUnsignedLongIndexFieldData.java new file mode 100644 index 0000000000000..7d0e143ab9436 --- /dev/null +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/SourceValueFetcherSortedUnsignedLongIndexFieldData.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.unsignedlong; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedNumericDocValues; +import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.fielddata.SourceValueFetcherIndexFieldData; +import org.elasticsearch.index.mapper.ValueFetcher; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.script.field.DocValuesScriptFieldFactory; +import org.elasticsearch.script.field.ToScriptFieldFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +import static org.elasticsearch.search.DocValueFormat.MASK_2_63; + +/** + * {@code SourceValueFetcherSortedUnsignedLongIndexFieldData} uses a {@link ValueFetcher} to + * retrieve values from source that are parsed as an unsigned long. These values are used to + * emulate unsigned long values pulled directly from a doc values data structure through a + * {@link SortedNumericDocValues}. + */ +public class SourceValueFetcherSortedUnsignedLongIndexFieldData extends SourceValueFetcherIndexFieldData { + + public static class Builder extends SourceValueFetcherIndexFieldData.Builder { + + public Builder( + String fieldName, + ValuesSourceType valuesSourceType, + ValueFetcher valueFetcher, + SourceLookup sourceLookup, + ToScriptFieldFactory toScriptFieldFactory + ) { + super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + } + + @Override + public SourceValueFetcherSortedUnsignedLongIndexFieldData build(IndexFieldDataCache cache, CircuitBreakerService breakerService) { + return new SourceValueFetcherSortedUnsignedLongIndexFieldData( + fieldName, + valuesSourceType, + valueFetcher, + sourceLookup, + toScriptFieldFactory + ); + } + } + + protected SourceValueFetcherSortedUnsignedLongIndexFieldData( + String fieldName, + ValuesSourceType valuesSourceType, + ValueFetcher valueFetcher, + SourceLookup sourceLookup, + ToScriptFieldFactory toScriptFieldFactory + ) { + super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + } + + @Override + public SourceValueFetcherLeafFieldData loadDirect(LeafReaderContext context) { + return new SourceValueFetcherSortedUnsignedLongLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceLookup); + } + + private static class SourceValueFetcherSortedUnsignedLongLeafFieldData extends SourceValueFetcherLeafFieldData { + + private SourceValueFetcherSortedUnsignedLongLeafFieldData( + ToScriptFieldFactory toScriptFieldFactory, + LeafReaderContext leafReaderContext, + ValueFetcher valueFetcher, + SourceLookup sourceLookup + ) { + super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceLookup); + } + + @Override + public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { + return toScriptFieldFactory.getScriptFieldFactory( + new SourceValueFetcherSortedUnsignedLongDocValues(leafReaderContext, valueFetcher, sourceLookup), + name + ); + } + } + + private static class SourceValueFetcherSortedUnsignedLongDocValues extends SortedNumericDocValues implements ValueFetcherDocValues { + + private final LeafReaderContext leafReaderContext; + + private final ValueFetcher valueFetcher; + private final SourceLookup sourceLookup; + + private List values; + private Iterator iterator; + + private SourceValueFetcherSortedUnsignedLongDocValues( + LeafReaderContext leafReaderContext, + ValueFetcher valueFetcher, + SourceLookup sourceLookup + ) { + this.leafReaderContext = leafReaderContext; + this.valueFetcher = valueFetcher; + this.sourceLookup = sourceLookup; + + values = new ArrayList<>(); + } + + @Override + public boolean advanceExact(int doc) throws IOException { + sourceLookup.setSegmentAndDocument(leafReaderContext, doc); + values.clear(); + + for (Object value : valueFetcher.fetchValues(sourceLookup, Collections.emptyList())) { + assert value instanceof Number; + values.add(((Number) value).longValue() ^ MASK_2_63); + } + + values.sort(Long::compare); + iterator = values.iterator(); + + return true; + } + + @Override + public int docValueCount() { + return values.size(); + } + + @Override + public long nextValue() throws IOException { + assert iterator.hasNext(); + return iterator.next(); + } + + @Override + public int docID() { + throw new UnsupportedOperationException("not supported for source fallback"); + } + + @Override + public int nextDoc() throws IOException { + throw new UnsupportedOperationException("not supported for source fallback"); + } + + @Override + public int advance(int target) throws IOException { + throw new UnsupportedOperationException("not supported for source fallback"); + } + + @Override + public long cost() { + throw new UnsupportedOperationException("not supported for source fallback"); + } + } +} diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index b0d01b1cc596a..96d8f7e6b3ea1 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -36,6 +36,8 @@ import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -50,6 +52,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.Function; import static org.elasticsearch.xpack.unsignedlong.UnsignedLongLeafFieldData.convertUnsignedLongToDouble; @@ -288,15 +291,37 @@ public Query rangeQuery( @Override public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { - failIfNoDocValues(); - return (cache, breakerService) -> { - final IndexNumericFieldData signedLongValues = new SortedNumericIndexFieldData.Builder( + FielddataOperation operation = fieldDataContext.fielddataOperation(); + + if (operation == FielddataOperation.SEARCH) { + failIfNoDocValues(); + } + + if ((operation == FielddataOperation.SEARCH || operation == FielddataOperation.SCRIPT) && hasDocValues()) { + return (cache, breakerService) -> { + final IndexNumericFieldData signedLongValues = new SortedNumericIndexFieldData.Builder( + name(), + IndexNumericFieldData.NumericType.LONG, + (dv, n) -> { throw new UnsupportedOperationException(); } + ).build(cache, breakerService); + return new UnsignedLongIndexFieldData(signedLongValues, UnsignedLongDocValuesField::new); + }; + } + + if (operation == FielddataOperation.SCRIPT) { + SearchLookup searchLookup = fieldDataContext.lookupSupplier().get(); + Set sourcePaths = fieldDataContext.sourcePathsLookup().apply(name()); + + return new SourceValueFetcherSortedUnsignedLongIndexFieldData.Builder( name(), - IndexNumericFieldData.NumericType.LONG, - (dv, n) -> { throw new UnsupportedOperationException(); } - ).build(cache, breakerService); - return new UnsignedLongIndexFieldData(signedLongValues, UnsignedLongDocValuesField::new); - }; + CoreValuesSourceType.NUMERIC, + sourceValueFetcher(sourcePaths), + searchLookup.source(), + UnsignedLongDocValuesField::new + ); + } + + throw new IllegalStateException("unknown field data operation [" + operation.name() + "]"); } @Override @@ -304,8 +329,11 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } + return sourceValueFetcher(context.isSourceEnabled() ? context.sourcePath(name()) : Collections.emptySet()); + } - return new SourceValueFetcher(name(), context, nullValueFormatted) { + private SourceValueFetcher sourceValueFetcher(Set sourcePaths) { + return new SourceValueFetcher(sourcePaths, nullValueFormatted) { @Override protected Object parseSourceValue(Object value) { if (value.equals("")) { diff --git a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/50_script_values.yml b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/50_script_values.yml index 749b3c4cf9fdf..8b56e37d13d6f 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/50_script_values.yml +++ b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/50_script_values.yml @@ -12,6 +12,14 @@ setup: properties: ul: type: unsigned_long + ul_ndv: + type: unsigned_long + doc_values: false + ul_multi: + type: unsigned_long + ul_ndv_multi: + type: unsigned_long + doc_values: false - do: bulk: @@ -19,15 +27,15 @@ setup: refresh: true body: | { "index": {"_id" : "1"} } - { "ul": 0 } + { "ul": 0, "ul_ndv": 0, "ul_multi": [18446744073709551615, 9223372036854775808, 0, 0], "ul_ndv_multi": [0, 18446744073709551615, 0, 9223372036854775808]} { "index": {"_id" : "2"} } - { "ul": 9223372036854775807 } + { "ul": 9223372036854775807, "ul_ndv": 9223372036854775807 } { "index": {"_id" : "3"} } - { "ul": 9223372036854775808 } + { "ul": 9223372036854775808, "ul_ndv": 9223372036854775808 } { "index": {"_id" : "4"} } - { "ul": 18446744073709551613 } + { "ul": 18446744073709551613, "ul_ndv": 18446744073709551613 } { "index": {"_id" : "5"} } - { "ul": 18446744073709551615 } + { "ul": 18446744073709551615, "ul_ndv": 18446744073709551615 } --- "Scripted fields values return Long": @@ -63,6 +71,89 @@ setup: - match: { hits.hits.3.fields.scripted_ul.0: 9223372036854775807 } - match: { hits.hits.4.fields.scripted_ul.0: 0 } +--- +"Scripted fields using multi-value unsigned long": + - do: + search: + index: test1 + body: + query: { term: { _id: "1" } } + script_fields: + scripted_ul_0: + script: + source: "field('ul_multi').getValue(1000L)" + scripted_ul_1: + script: + source: "field('ul_multi').getValue(1, 1000L)" + scripted_ul_2: + script: + source: "field('ul_multi').getValue(2, 1000L)" + scripted_ul_3: + script: + source: "field('ul_multi').getValue(3, 1000L)" + + - match: { hits.hits.0.fields.scripted_ul_0.0: 0 } + - match: { hits.hits.0.fields.scripted_ul_1.0: 0 } + - match: { hits.hits.0.fields.scripted_ul_2.0: -9223372036854775808 } + - match: { hits.hits.0.fields.scripted_ul_3.0: -1 } + +--- +"No Doc Values: Scripted fields values return Long": + - do: + search: + index: test1 + body: + sort: [ { ul: desc } ] + script_fields: + scripted_ul: + script: + source: "field('ul_ndv').getValue(1000L)" + + - match: { hits.hits.0.fields.scripted_ul.0: -1 } + - match: { hits.hits.1.fields.scripted_ul.0: -3 } + - match: { hits.hits.2.fields.scripted_ul.0: -9223372036854775808 } + - match: { hits.hits.3.fields.scripted_ul.0: 9223372036854775807 } + - match: { hits.hits.4.fields.scripted_ul.0: 0 } + + - do: + catch: bad_request + search: + index: test1 + body: + sort: [ { ul: desc } ] + script_fields: + scripted_ul: + script: + source: "doc['ul_ndv'].value" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + +--- +"No Doc Values: Scripted fields using multi-value unsigned long": + - do: + search: + index: test1 + body: + query: { term: { _id: "1" } } + script_fields: + scripted_ul_0: + script: + source: "field('ul_ndv_multi').getValue(1000L)" + scripted_ul_1: + script: + source: "field('ul_ndv_multi').getValue(1, 1000L)" + scripted_ul_2: + script: + source: "field('ul_ndv_multi').getValue(2, 1000L)" + scripted_ul_3: + script: + source: "field('ul_ndv_multi').getValue(3, 1000L)" + + - match: { hits.hits.0.fields.scripted_ul_0.0: 0 } + - match: { hits.hits.0.fields.scripted_ul_1.0: 0 } + - match: { hits.hits.0.fields.scripted_ul_2.0: -9223372036854775808 } + - match: { hits.hits.0.fields.scripted_ul_3.0: -1 } + + --- "Scripted sort values": - do: From f849847aef927b498c5c75fe683fc49ff74638ec Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Wed, 17 Aug 2022 08:10:51 -0700 Subject: [PATCH 243/265] Fix duplication bug for source fallback in numeric types (#89352) Currently, source fallback numeric types do not match doc values numeric types. Source fallback numeric types de-duplicate numeric values in multi-valued fields. This change removes the de- duplication for source fallback values for numeric types using value fetchers. This also adds test cases for all the supported source fallback types to ensure they continue to match their doc values counterparts exactly. --- docs/changelog/89352.yaml | 5 + .../painless/55_script_doc_values_dup.yml | 855 ++++++++++++++++++ ...lueFetcherMultiGeoPointIndexFieldData.java | 4 +- ...alueFetcherSortedDoubleIndexFieldData.java | 10 +- ...lueFetcherSortedNumericIndexFieldData.java | 10 +- 5 files changed, 876 insertions(+), 8 deletions(-) create mode 100644 docs/changelog/89352.yaml create mode 100644 modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/55_script_doc_values_dup.yml diff --git a/docs/changelog/89352.yaml b/docs/changelog/89352.yaml new file mode 100644 index 0000000000000..8ad5f01993e40 --- /dev/null +++ b/docs/changelog/89352.yaml @@ -0,0 +1,5 @@ +pr: 89352 +summary: Fix duplication bug for source fallback in numeric types +area: Mapping +type: bug +issues: [] diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/55_script_doc_values_dup.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/55_script_doc_values_dup.yml new file mode 100644 index 0000000000000..d10d38e78e6de --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/55_script_doc_values_dup.yml @@ -0,0 +1,855 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + mappings: + properties: + boolean: + type: boolean + boolean_no_doc_values: + type: boolean + doc_values: false + geo_point: + type: geo_point + geo_point_no_doc_values: + type: geo_point + doc_values: false + keyword: + type: keyword + keyword_no_doc_values: + type: keyword + doc_values: false + long: + type: long + long_no_doc_values: + type: long + doc_values: false + integer: + type: integer + integer_no_doc_values: + type: integer + doc_values: false + short: + type: short + short_no_doc_values: + type: short + doc_values: false + byte: + type: byte + byte_no_doc_values: + type: byte + doc_values: false + double: + type: double + double_no_doc_values: + type: double + doc_values: false + float: + type: float + float_no_doc_values: + type: float + doc_values: false + half_float: + type: half_float + half_float_no_doc_values: + type: half_float + doc_values: false + scaled_float: + type: scaled_float + scaling_factor: 100 + scaled_float_no_doc_values: + type: scaled_float + scaling_factor: 100 + doc_values: false + + - do: + index: + index: test + id: "1" + body: + rank: 1 + boolean: [true, false, true] + boolean_no_doc_values: [true, true, false] + geo_point: [[-71.34,41.12],[60.32,21.25],[-71.34,41.12]] + geo_point_no_doc_values: [[-71.34,41.12],[60.32,21.25],[-71.34,41.12]] + keyword: ["one string", "another string", "one string"] + keyword_no_doc_values: ["one string", "another string", "one string"] + long: [1152921504606846976, -1, -576460752303423488, -1] + long_no_doc_values: [1152921504606846976, -1, -576460752303423488, -1] + integer: [5, -17, 29, -17] + integer_no_doc_values: [5, -17, 29, -17] + short: [45, 6, -18, 30, 45] + short_no_doc_values: [6, -18, 45, 45, 30] + byte: [16, -8, 64, -8, 4, 64] + byte_no_doc_values: [16, -8, -8, 4, 64, 64] + double: [3.141592653588, 2.141592653587, -1.0, -1.0] + double_no_doc_values: [-1.0, 3.141592653588, 2.141592653587, -1.0] + float: [1.123, 2.234, 2.234] + float_no_doc_values: [2.234, 2.234, 1.123] + half_float: [1.123, 1.123, 2.234] + half_float_no_doc_values: [2.234, 1.123, 1.123] + scaled_float: [-3.5, 2.5, -3.5] + scaled_float_no_doc_values: [2.5, -3.5, -3.5] + + - do: + indices.refresh: {} + +--- +"boolean_dup_order": + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "doc['boolean'].get(0)" + field_1: + script: + source: "doc['boolean'].get(1)" + field_2: + script: + source: "doc['boolean'].get(2)" + - match: { hits.hits.0.fields.field_0.0: false } + - match: { hits.hits.0.fields.field_1.0: true } + - match: { hits.hits.0.fields.field_2.0: true } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('boolean').get(true)" + field_1: + script: + source: "field('boolean').get(1, false)" + field_2: + script: + source: "field('boolean').get(2, false)" + - match: { hits.hits.0.fields.field_0.0: false } + - match: { hits.hits.0.fields.field_1.0: true } + - match: { hits.hits.0.fields.field_2.0: true } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('boolean_no_doc_values').get(true)" + field_1: + script: + source: "field('boolean_no_doc_values').get(1, false)" + field_2: + script: + source: "field('boolean_no_doc_values').get(2, false)" + - match: { hits.hits.0.fields.field_0.0: false } + - match: { hits.hits.0.fields.field_1.0: true } + - match: { hits.hits.0.fields.field_2.0: true } + +--- +"geo_point_dup_order": + - skip: + features: close_to + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "doc['geo_point'].get(0)" + field_1: + script: + source: "doc['geo_point'].get(1)" + field_2: + script: + source: "doc['geo_point'].get(2)" + - close_to: { hits.hits.0.fields.field_0.0.lat: { value: 21.249999990686774, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_0.0.lon: { value: 60.319999968633056, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0.lat: { value: 41.1199999647215, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0.lon: { value: -71.34000004269183, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0.lat: { value: 41.1199999647215, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0.lon: { value: -71.34000004269183, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('geo_point').get(new GeoPoint())" + field_1: + script: + source: "field('geo_point').get(1, new GeoPoint())" + field_2: + script: + source: "field('geo_point').get(2, new GeoPoint())" + - close_to: { hits.hits.0.fields.field_0.0.lat: { value: 21.249999990686774, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_0.0.lon: { value: 60.319999968633056, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0.lat: { value: 41.1199999647215, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0.lon: { value: -71.34000004269183, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0.lat: { value: 41.1199999647215, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0.lon: { value: -71.34000004269183, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('geo_point_no_doc_values').get(new GeoPoint())" + field_1: + script: + source: "field('geo_point_no_doc_values').get(1, new GeoPoint())" + field_2: + script: + source: "field('geo_point_no_doc_values').get(2, new GeoPoint())" + - close_to: { hits.hits.0.fields.field_0.0.lat: { value: 21.249999990686774, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_0.0.lon: { value: 60.319999968633056, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0.lat: { value: 41.1199999647215, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0.lon: { value: -71.34000004269183, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0.lat: { value: 41.1199999647215, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0.lon: { value: -71.34000004269183, error: 0.001 } } + +--- +"keyword_dup_order": + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "doc['keyword'].get(0)" + field_1: + script: + source: "doc['keyword'].get(1)" + - match: { hits.hits.0.fields.field_0.0: "another string" } + - match: { hits.hits.0.fields.field_1.0: "one string" } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('keyword').get('')" + field_1: + script: + source: "field('keyword').get(1, '')" + - match: { hits.hits.0.fields.field_0.0: "another string" } + - match: { hits.hits.0.fields.field_1.0: "one string" } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('keyword_no_doc_values').get('')" + field_1: + script: + source: "field('keyword_no_doc_values').get(1, '')" + - match: { hits.hits.0.fields.field_0.0: "another string" } + - match: { hits.hits.0.fields.field_1.0: "one string" } + +--- +"long_dup_order": + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "doc['long'].get(0)" + field_1: + script: + source: "doc['long'].get(1)" + field_2: + script: + source: "doc['long'].get(2)" + field_3: + script: + source: "doc['long'].get(3)" + - match: { hits.hits.0.fields.field_0.0: -576460752303423488 } + - match: { hits.hits.0.fields.field_1.0: -1 } + - match: { hits.hits.0.fields.field_2.0: -1 } + - match: { hits.hits.0.fields.field_3.0: 1152921504606846976 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('long').get(0L)" + field_1: + script: + source: "field('long').get(1, 0L)" + field_2: + script: + source: "field('long').get(2, 0L)" + field_3: + script: + source: "field('long').get(3, 0L)" + - match: { hits.hits.0.fields.field_0.0: -576460752303423488 } + - match: { hits.hits.0.fields.field_1.0: -1 } + - match: { hits.hits.0.fields.field_2.0: -1 } + - match: { hits.hits.0.fields.field_3.0: 1152921504606846976 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('long_no_doc_values').get(0L)" + field_1: + script: + source: "field('long_no_doc_values').get(1, 0L)" + field_2: + script: + source: "field('long_no_doc_values').get(2, 0L)" + field_3: + script: + source: "field('long_no_doc_values').get(3, 0L)" + - match: { hits.hits.0.fields.field_0.0: -576460752303423488 } + - match: { hits.hits.0.fields.field_1.0: -1 } + - match: { hits.hits.0.fields.field_2.0: -1 } + - match: { hits.hits.0.fields.field_3.0: 1152921504606846976 } + +--- +"integer_dup_order": + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "doc['integer'].get(0)" + field_1: + script: + source: "doc['integer'].get(1)" + field_2: + script: + source: "doc['integer'].get(2)" + field_3: + script: + source: "doc['integer'].get(3)" + - match: { hits.hits.0.fields.field_0.0: -17 } + - match: { hits.hits.0.fields.field_1.0: -17 } + - match: { hits.hits.0.fields.field_2.0: 5 } + - match: { hits.hits.0.fields.field_3.0: 29 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('integer').get(0)" + field_1: + script: + source: "field('integer').get(1, 0)" + field_2: + script: + source: "field('integer').get(2, 0)" + field_3: + script: + source: "field('integer').get(3, 0)" + - match: { hits.hits.0.fields.field_0.0: -17 } + - match: { hits.hits.0.fields.field_1.0: -17 } + - match: { hits.hits.0.fields.field_2.0: 5 } + - match: { hits.hits.0.fields.field_3.0: 29 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('integer_no_doc_values').get(0)" + field_1: + script: + source: "field('integer_no_doc_values').get(1, 0)" + field_2: + script: + source: "field('integer_no_doc_values').get(2, 0)" + field_3: + script: + source: "field('integer_no_doc_values').get(3, 0)" + - match: { hits.hits.0.fields.field_0.0: -17 } + - match: { hits.hits.0.fields.field_1.0: -17 } + - match: { hits.hits.0.fields.field_2.0: 5 } + - match: { hits.hits.0.fields.field_3.0: 29 } + +--- +"short_dup_order": + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "doc['short'].get(0)" + field_1: + script: + source: "doc['short'].get(1)" + field_2: + script: + source: "doc['short'].get(2)" + field_3: + script: + source: "doc['short'].get(3)" + field_4: + script: + source: "doc['short'].get(4)" + - match: { hits.hits.0.fields.field_0.0: -18 } + - match: { hits.hits.0.fields.field_1.0: 6 } + - match: { hits.hits.0.fields.field_2.0: 30 } + - match: { hits.hits.0.fields.field_3.0: 45 } + - match: { hits.hits.0.fields.field_4.0: 45 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('short').get((short)0)" + field_1: + script: + source: "field('short').get(1, (short)0)" + field_2: + script: + source: "field('short').get(2, (short)0)" + field_3: + script: + source: "field('short').get(3, (short)0)" + field_4: + script: + source: "field('short').get(4, (short)0)" + - match: { hits.hits.0.fields.field_0.0: -18 } + - match: { hits.hits.0.fields.field_1.0: 6 } + - match: { hits.hits.0.fields.field_2.0: 30 } + - match: { hits.hits.0.fields.field_3.0: 45 } + - match: { hits.hits.0.fields.field_4.0: 45 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('short_no_doc_values').get((short)0)" + field_1: + script: + source: "field('short_no_doc_values').get(1, (short)0)" + field_2: + script: + source: "field('short_no_doc_values').get(2, (short)0)" + field_3: + script: + source: "field('short_no_doc_values').get(3, (short)0)" + field_4: + script: + source: "field('short_no_doc_values').get(4, (short)0)" + - match: { hits.hits.0.fields.field_0.0: -18 } + - match: { hits.hits.0.fields.field_1.0: 6 } + - match: { hits.hits.0.fields.field_2.0: 30 } + - match: { hits.hits.0.fields.field_3.0: 45 } + - match: { hits.hits.0.fields.field_4.0: 45 } + +--- +"byte_dup_order": + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "doc['byte'].get(0)" + field_1: + script: + source: "doc['byte'].get(1)" + field_2: + script: + source: "doc['byte'].get(2)" + field_3: + script: + source: "doc['byte'].get(3)" + field_4: + script: + source: "doc['byte'].get(4)" + field_5: + script: + source: "doc['byte'].get(5)" + - match: { hits.hits.0.fields.field_0.0: -8 } + - match: { hits.hits.0.fields.field_1.0: -8 } + - match: { hits.hits.0.fields.field_2.0: 4 } + - match: { hits.hits.0.fields.field_3.0: 16 } + - match: { hits.hits.0.fields.field_4.0: 64 } + - match: { hits.hits.0.fields.field_5.0: 64 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('byte').get((byte)0)" + field_1: + script: + source: "field('byte').get(1, (byte)0)" + field_2: + script: + source: "field('byte').get(2, (byte)0)" + field_3: + script: + source: "field('byte').get(3, (byte)0)" + field_4: + script: + source: "field('byte').get(4, (byte)0)" + field_5: + script: + source: "field('byte').get(5, (byte)0)" + - match: { hits.hits.0.fields.field_0.0: -8 } + - match: { hits.hits.0.fields.field_1.0: -8 } + - match: { hits.hits.0.fields.field_2.0: 4 } + - match: { hits.hits.0.fields.field_3.0: 16 } + - match: { hits.hits.0.fields.field_4.0: 64 } + - match: { hits.hits.0.fields.field_5.0: 64 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('byte_no_doc_values').get((byte)0)" + field_1: + script: + source: "field('byte_no_doc_values').get(1, (byte)0)" + field_2: + script: + source: "field('byte_no_doc_values').get(2, (byte)0)" + field_3: + script: + source: "field('byte_no_doc_values').get(3, (byte)0)" + field_4: + script: + source: "field('byte_no_doc_values').get(4, (byte)0)" + field_5: + script: + source: "field('byte_no_doc_values').get(5, (byte)0)" + - match: { hits.hits.0.fields.field_0.0: -8 } + - match: { hits.hits.0.fields.field_1.0: -8 } + - match: { hits.hits.0.fields.field_2.0: 4 } + - match: { hits.hits.0.fields.field_3.0: 16 } + - match: { hits.hits.0.fields.field_4.0: 64 } + - match: { hits.hits.0.fields.field_5.0: 64 } + +--- +"double_dup_order": + - skip: + features: close_to + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "doc['double'].get(0)" + field_1: + script: + source: "doc['double'].get(1)" + field_2: + script: + source: "doc['double'].get(2)" + field_3: + script: + source: "doc['double'].get(3)" + - close_to: { hits.hits.0.fields.field_0.0: { value: -1.0, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: -1.0, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.141592653587, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_3.0: { value: 3.141592653588, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('double').get(0)" + field_1: + script: + source: "field('double').get(1, 0)" + field_2: + script: + source: "field('double').get(2, 0)" + field_3: + script: + source: "field('double').get(3, 0)" + - close_to: { hits.hits.0.fields.field_0.0: { value: -1.0, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: -1.0, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.141592653587, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_3.0: { value: 3.141592653588, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('double_no_doc_values').get(0)" + field_1: + script: + source: "field('double_no_doc_values').get(1, 0)" + field_2: + script: + source: "field('double_no_doc_values').get(2, 0)" + field_3: + script: + source: "field('double_no_doc_values').get(3, 0)" + - close_to: { hits.hits.0.fields.field_0.0: { value: -1.0, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: -1.0, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.141592653587, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_3.0: { value: 3.141592653588, error: 0.001 } } + +--- +"float_dup_order": + - skip: + features: close_to + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "doc['float'].get(0)" + field_1: + script: + source: "doc['float'].get(1)" + field_2: + script: + source: "doc['float'].get(2)" + - close_to: { hits.hits.0.fields.field_0.0: { value: 1.123, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: 2.234, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.234, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('float').get(0)" + field_1: + script: + source: "field('float').get(1, 0)" + field_2: + script: + source: "field('float').get(2, 0)" + - close_to: { hits.hits.0.fields.field_0.0: { value: 1.123, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: 2.234, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.234, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('float_no_doc_values').get(0)" + field_1: + script: + source: "field('float_no_doc_values').get(1, 0)" + field_2: + script: + source: "field('float_no_doc_values').get(2, 0)" + - close_to: { hits.hits.0.fields.field_0.0: { value: 1.123, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: 2.234, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.234, error: 0.001 } } + +--- +"half_float_dup_order": + - skip: + features: close_to + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "doc['half_float'].get(0)" + field_1: + script: + source: "doc['half_float'].get(1)" + field_2: + script: + source: "doc['half_float'].get(2)" + - close_to: { hits.hits.0.fields.field_0.0: { value: 1.123, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: 1.123, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.234, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('half_float').get(0)" + field_1: + script: + source: "field('half_float').get(1, 0)" + field_2: + script: + source: "field('half_float').get(2, 0)" + - close_to: { hits.hits.0.fields.field_0.0: { value: 1.123, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: 1.123, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.234, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('half_float_no_doc_values').get(0)" + field_1: + script: + source: "field('half_float_no_doc_values').get(1, 0)" + field_2: + script: + source: "field('half_float_no_doc_values').get(2, 0)" + - close_to: { hits.hits.0.fields.field_0.0: { value: 1.123, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: 1.123, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.234, error: 0.001 } } + +--- +"scaled_float_dup_order": + - skip: + features: close_to + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "doc['scaled_float'].get(0)" + field_1: + script: + source: "doc['scaled_float'].get(1)" + field_2: + script: + source: "doc['scaled_float'].get(2)" + - close_to: { hits.hits.0.fields.field_0.0: { value: -3.5, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: -3.5, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.5, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('scaled_float').get(0)" + field_1: + script: + source: "field('scaled_float').get(1, 0)" + field_2: + script: + source: "field('scaled_float').get(2, 0)" + - close_to: { hits.hits.0.fields.field_0.0: { value: -3.5, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: -3.5, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.5, error: 0.001 } } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field_0: + script: + source: "field('scaled_float_no_doc_values').get(0)" + field_1: + script: + source: "field('scaled_float_no_doc_values').get(1, 0)" + field_2: + script: + source: "field('scaled_float_no_doc_values').get(2, 0)" + - close_to: { hits.hits.0.fields.field_0.0: { value: -3.5, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_1.0: { value: -3.5, error: 0.001 } } + - close_to: { hits.hits.0.fields.field_2.0: { value: 2.5, error: 0.001 } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java index 5ee4f860e19f1..a0ed0c6944087 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java @@ -21,7 +21,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.TreeSet; public class SourceValueFetcherMultiGeoPointIndexFieldData extends SourceValueFetcherIndexFieldData { @@ -100,7 +99,7 @@ public SourceValueFetcherMultiGeoPointDocValues( @SuppressWarnings("unchecked") public boolean advanceExact(int doc) throws IOException { sourceLookup.setSegmentAndDocument(leafReaderContext, doc); - values = new TreeSet<>(); + values.clear(); for (Object value : valueFetcher.fetchValues(sourceLookup, Collections.emptyList())) { assert value instanceof Map && ((Map) value).get("coordinates") instanceof List; @@ -111,6 +110,7 @@ public boolean advanceExact(int doc) throws IOException { ); } + values.sort(Long::compare); iterator = values.iterator(); return true; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java index dea18d6eae6d1..8fd8010bb2532 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java @@ -17,9 +17,10 @@ import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; -import java.util.TreeSet; +import java.util.List; public class SourceValueFetcherSortedDoubleIndexFieldData extends SourceValueFetcherIndexFieldData { @@ -89,7 +90,7 @@ private static class SourceValueFetcherSortedNumericDoubleValues extends SortedN private final ValueFetcher valueFetcher; private final SourceLookup sourceLookup; - private TreeSet values; + private final List values; private Iterator iterator; private SourceValueFetcherSortedNumericDoubleValues( @@ -100,18 +101,21 @@ private SourceValueFetcherSortedNumericDoubleValues( this.leafReaderContext = leafReaderContext; this.valueFetcher = valueFetcher; this.sourceLookup = sourceLookup; + + values = new ArrayList<>(); } @Override public boolean advanceExact(int doc) throws IOException { sourceLookup.setSegmentAndDocument(leafReaderContext, doc); - values = new TreeSet<>(); + values.clear(); for (Object value : valueFetcher.fetchValues(sourceLookup, Collections.emptyList())) { assert value instanceof Number; values.add(((Number) value).doubleValue()); } + values.sort(Double::compare); iterator = values.iterator(); return true; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java index 70b7db917e20a..491898baaa08f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java @@ -18,9 +18,10 @@ import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; -import java.util.TreeSet; +import java.util.List; public class SourceValueFetcherSortedNumericIndexFieldData extends SourceValueFetcherIndexFieldData { @@ -90,7 +91,7 @@ public static class SourceValueFetcherSortedNumericDocValues extends SortedNumer protected final ValueFetcher valueFetcher; protected final SourceLookup sourceLookup; - protected TreeSet values; + protected final List values; protected Iterator iterator; public SourceValueFetcherSortedNumericDocValues( @@ -101,18 +102,21 @@ public SourceValueFetcherSortedNumericDocValues( this.leafReaderContext = leafReaderContext; this.valueFetcher = valueFetcher; this.sourceLookup = sourceLookup; + + values = new ArrayList<>(); } @Override public boolean advanceExact(int doc) throws IOException { sourceLookup.setSegmentAndDocument(leafReaderContext, doc); - values = new TreeSet<>(); + values.clear(); for (Object value : valueFetcher.fetchValues(sourceLookup, Collections.emptyList())) { assert value instanceof Number; values.add(((Number) value).longValue()); } + values.sort(Long::compare); iterator = values.iterator(); return true; From cbea639155925a93e19f16b71beaccaed9af79f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Francisco=20Fern=C3=A1ndez=20Casta=C3=B1o?= Date: Wed, 17 Aug 2022 17:42:47 +0200 Subject: [PATCH 244/265] Add the ability to run REST integration tests with 1 allocated processor (#89234) This commit adds a new system property `tests.configure_test_clusters_with_one_processor` that configures REST integration tests to run with 1 allocated processor, this should help finding possible deadlocks (if any) when the Elasticsearch nodes have `node.processors` set to 1. --- .../gradle/internal/InternalTestClustersPlugin.java | 12 ++++++++++++ .../org/elasticsearch/test/InternalTestCluster.java | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java index cc8348d424e58..196835ccdd06d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java @@ -10,7 +10,9 @@ import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; import org.elasticsearch.gradle.testclusters.TestClustersPlugin; +import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.provider.ProviderFactory; @@ -36,6 +38,16 @@ public void apply(Project project) { version -> (version.equals(VersionProperties.getElasticsearchVersion()) && BuildParams.isSnapshotBuild() == false) || BuildParams.getBwcVersions().unreleasedInfo(version) == null ); + + if (shouldConfigureTestClustersWithOneProcessor()) { + NamedDomainObjectContainer testClusters = (NamedDomainObjectContainer) project + .getExtensions() + .getByName(TestClustersPlugin.EXTENSION_NAME); + testClusters.configureEach(elasticsearchCluster -> elasticsearchCluster.setting("node.processors", "1")); + } } + private boolean shouldConfigureTestClustersWithOneProcessor() { + return Boolean.parseBoolean(System.getProperty("tests.configure_test_clusters_with_one_processor", "false")); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index c46114148695a..923639324e8dd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -524,7 +524,7 @@ private static Settings getRandomNodeSettings(long seed) { builder.put( EsExecutors.NODE_PROCESSORS_SETTING.getKey(), - 1 + random.nextInt(Math.min(4, Runtime.getRuntime().availableProcessors())) + RandomNumbers.randomIntBetween(random, 1, Math.min(4, Runtime.getRuntime().availableProcessors())) ); if (random.nextBoolean()) { if (random.nextBoolean()) { From 1aa43ecf2c73e3d64f239514cf95d20cebd41ca0 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Wed, 17 Aug 2022 09:13:13 -0700 Subject: [PATCH 245/265] Add text field support in the Painless scripting fields API (#89396) This change adds access to mapped text fields via the Painless scripting fields API. The values returned from a text field via the scripting fields API always use source as described by (#81246). Access via the old-style through doc will still depend on field data, so there is no change and avoids bwc issues. --- docs/changelog/89396.yaml | 5 + .../test/painless/50_script_doc_values.yml | 267 ++++++++++++++++++ ...alueFetcherSortedBinaryIndexFieldData.java | 15 +- .../index/mapper/TextFieldMapper.java | 55 ++-- .../script/field/TextDocValuesField.java | 17 ++ 5 files changed, 335 insertions(+), 24 deletions(-) create mode 100644 docs/changelog/89396.yaml create mode 100644 server/src/main/java/org/elasticsearch/script/field/TextDocValuesField.java diff --git a/docs/changelog/89396.yaml b/docs/changelog/89396.yaml new file mode 100644 index 0000000000000..933f951437d4e --- /dev/null +++ b/docs/changelog/89396.yaml @@ -0,0 +1,5 @@ +pr: 89396 +summary: Add text field support in the Painless scripting fields API +area: Mapping +type: enhancement +issues: [] diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml index dd2187673134a..979f0a1cdf7df 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -70,6 +70,11 @@ setup: type: scaled_float scaling_factor: 100 doc_values: false + text: + type: text + fielddata: true + text_no_field_data: + type: text token_count: type: token_count analyzer: standard @@ -110,6 +115,8 @@ setup: half_float_no_doc_values: 3.140625 scaled_float: 3.14 scaled_float_no_doc_values: 3.14 + text: "Lots of text." + text_no_field_data: "Lots of text." token_count: count all these words please - do: @@ -150,6 +157,8 @@ setup: half_float_no_doc_values: [2.234, 1.123] scaled_float: [-3.5, 2.5] scaled_float_no_doc_values: [2.5, -3.5] + text: ["Lots of text.", "even more text", "SOOOOO much text"] + text_no_field_data: ["Lots of text.", "even more text", "SOOOOO much text"] - do: @@ -2719,6 +2728,264 @@ setup: source: "int value = field('dne').get(1, 1); value" - match: { hits.hits.0.fields.field.0: 1 } +--- +"text": + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['text'].get(0)" + - match: { hits.hits.0.fields.field.0: lots } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['text'].value" + - match: { hits.hits.0.fields.field.0: lots } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('text').get('')" + - match: { hits.hits.0.fields.field.0: "Lots of text." } + - match: { hits.hits.1.fields.field.0: "" } + - match: { hits.hits.2.fields.field.0: "Lots of text." } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('text', '')" + - match: { hits.hits.0.fields.field.0: "Lots of text." } + - match: { hits.hits.1.fields.field.0: "" } + - match: { hits.hits.2.fields.field.0: "Lots of text." } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "String defaultText = 'default text'; field('text').get(defaultText)" + - match: { hits.hits.0.fields.field.0: "Lots of text." } + - match: { hits.hits.1.fields.field.0: "default text" } + - match: { hits.hits.2.fields.field.0: "Lots of text." } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "String defaultText = 'default text'; $('text', defaultText)" + - match: { hits.hits.0.fields.field.0: "Lots of text." } + - match: { hits.hits.1.fields.field.0: "default text" } + - match: { hits.hits.2.fields.field.0: "Lots of text." } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('text').get(1, '')" + - match: { hits.hits.0.fields.field.0: "" } + - match: { hits.hits.1.fields.field.0: "" } + - match: { hits.hits.2.fields.field.0: "SOOOOO much text" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "String defaultText = 'default text'; field('text').get(1, defaultText)" + - match: { hits.hits.0.fields.field.0: "default text" } + - match: { hits.hits.1.fields.field.0: "default text" } + - match: { hits.hits.2.fields.field.0: "SOOOOO much text" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('text').get(1, '')" + - match: { hits.hits.0.fields.field.0: "" } + - match: { hits.hits.1.fields.field.0: "" } + - match: { hits.hits.2.fields.field.0: "SOOOOO much text" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "String cat = ''; for (String s : field('text')) { cat += s; } cat + field('text').size();" + - match: { hits.hits.0.fields.field.0: "Lots of text.1" } + - match: { hits.hits.1.fields.field.0: "0" } + - match: { hits.hits.2.fields.field.0: "Lots of text.SOOOOO much texteven more text3" } + +--- +"text_no_field_data": + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['text_no_field_data'].get(0)" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc['text_no_field_data'].value" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('text_no_field_data').get('')" + - match: { hits.hits.0.fields.field.0: "Lots of text." } + - match: { hits.hits.1.fields.field.0: "" } + - match: { hits.hits.2.fields.field.0: "Lots of text." } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('text_no_field_data', '')" + - match: { hits.hits.0.fields.field.0: "Lots of text." } + - match: { hits.hits.1.fields.field.0: "" } + - match: { hits.hits.2.fields.field.0: "Lots of text." } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "String defaultText = 'default text'; field('text_no_field_data').get(defaultText)" + - match: { hits.hits.0.fields.field.0: "Lots of text." } + - match: { hits.hits.1.fields.field.0: "default text" } + - match: { hits.hits.2.fields.field.0: "Lots of text." } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "String defaultText = 'default text'; $('text_no_field_data', defaultText)" + - match: { hits.hits.0.fields.field.0: "Lots of text." } + - match: { hits.hits.1.fields.field.0: "default text" } + - match: { hits.hits.2.fields.field.0: "Lots of text." } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('text_no_field_data').get(1, '')" + - match: { hits.hits.0.fields.field.0: "" } + - match: { hits.hits.1.fields.field.0: "" } + - match: { hits.hits.2.fields.field.0: "SOOOOO much text" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "String defaultText = 'default text'; field('text_no_field_data').get(1, defaultText)" + - match: { hits.hits.0.fields.field.0: "default text" } + - match: { hits.hits.1.fields.field.0: "default text" } + - match: { hits.hits.2.fields.field.0: "SOOOOO much text" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "field('text_no_field_data').get(1, '')" + - match: { hits.hits.0.fields.field.0: "" } + - match: { hits.hits.1.fields.field.0: "" } + - match: { hits.hits.2.fields.field.0: "SOOOOO much text" } + + - do: + search: + rest_total_hits_as_int: true + body: + sort: [ { rank: asc } ] + script_fields: + field: + script: + source: "String cat = ''; for (String s : field('text_no_field_data')) { cat += s; } cat + field('text_no_field_data').size();" + - match: { hits.hits.0.fields.field.0: "Lots of text.1" } + - match: { hits.hits.1.fields.field.0: "0" } + - match: { hits.hits.2.fields.field.0: "Lots of text.SOOOOO much texteven more text3" } + --- "version and sequence number": - do: diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java index 501430149a0ce..535cc3320b2ae 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java @@ -91,8 +91,8 @@ public static class SourceValueFetcherSortedBinaryDocValues extends SortedBinary private final ValueFetcher valueFetcher; private final SourceLookup sourceLookup; - private SortedSet values; - private Iterator iterator; + private final SortedSet values; + private Iterator iterator; public SourceValueFetcherSortedBinaryDocValues( LeafReaderContext leafReaderContext, @@ -102,12 +102,19 @@ public SourceValueFetcherSortedBinaryDocValues( this.leafReaderContext = leafReaderContext; this.valueFetcher = valueFetcher; this.sourceLookup = sourceLookup; + + values = new TreeSet<>(); } @Override public boolean advanceExact(int doc) throws IOException { sourceLookup.setSegmentAndDocument(leafReaderContext, doc); - values = new TreeSet<>(valueFetcher.fetchValues(sourceLookup, Collections.emptyList())); + values.clear(); + + for (Object object : valueFetcher.fetchValues(sourceLookup, Collections.emptyList())) { + values.add(new BytesRef(object.toString())); + } + iterator = values.iterator(); return true; @@ -121,7 +128,7 @@ public int docValueCount() { @Override public BytesRef nextValue() throws IOException { assert iterator.hasNext(); - return new BytesRef(iterator.next().toString()); + return iterator.next(); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 721f0bbc7ab61..045be484f09c3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -60,10 +60,12 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.index.fielddata.SourceValueFetcherSortedBinaryIndexFieldData; import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.script.field.DelegateDocValuesField; +import org.elasticsearch.script.field.TextDocValuesField; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -894,29 +896,42 @@ public static boolean hasGaps(TokenStream stream) throws IOException { @Override public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { - if (fielddata == false) { - throw new IllegalArgumentException( - "Text fields are not optimised for operations that require per-document " - + "field data like aggregations and sorting, so these operations are disabled by default. Please use a " - + "keyword field instead. Alternatively, set fielddata=true on [" - + name() - + "] in order to load " - + "field data by uninverting the inverted index. Note that this can use significant memory." + FielddataOperation operation = fieldDataContext.fielddataOperation(); + + if (operation == FielddataOperation.SCRIPT) { + return new SourceValueFetcherSortedBinaryIndexFieldData.Builder( + name(), + CoreValuesSourceType.KEYWORD, + SourceValueFetcher.toString(fieldDataContext.sourcePathsLookup().apply(name())), + fieldDataContext.lookupSupplier().get().source(), + TextDocValuesField::new + ); + } else if (operation == FielddataOperation.SEARCH) { + if (fielddata == false) { + throw new IllegalArgumentException( + "Text fields are not optimised for operations that require per-document " + + "field data like aggregations and sorting, so these operations are disabled by default. Please use a " + + "keyword field instead. Alternatively, set fielddata=true on [" + + name() + + "] in order to load " + + "field data by uninverting the inverted index. Note that this can use significant memory." + ); + } + return new PagedBytesIndexFieldData.Builder( + name(), + filter.minFreq, + filter.maxFreq, + filter.minSegmentSize, + CoreValuesSourceType.KEYWORD, + (dv, n) -> new DelegateDocValuesField( + new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(FieldData.toString(dv))), + n + ) ); } - return new PagedBytesIndexFieldData.Builder( - name(), - filter.minFreq, - filter.maxFreq, - filter.minSegmentSize, - CoreValuesSourceType.KEYWORD, - (dv, n) -> new DelegateDocValuesField( - new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(FieldData.toString(dv))), - n - ) - ); - } + throw new IllegalStateException("unknown field data operation [" + operation.name() + "]"); + } } public static class ConstantScoreTextFieldType extends TextFieldType { diff --git a/server/src/main/java/org/elasticsearch/script/field/TextDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/TextDocValuesField.java new file mode 100644 index 0000000000000..7d2bc45f7d059 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/TextDocValuesField.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script.field; + +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; + +public class TextDocValuesField extends BaseKeywordDocValuesField { + public TextDocValuesField(SortedBinaryDocValues input, String name) { + super(input, name); + } +} From f31b1f6d578da3a86887ffe7ff493c1a2195d8dc Mon Sep 17 00:00:00 2001 From: dh-cloud <60729713+dh-cloud@users.noreply.github.com> Date: Thu, 18 Aug 2022 00:53:48 +0800 Subject: [PATCH 246/265] fix a typo in Security.java (#89248) Fix name of path.conf option. --- docs/changelog/89248.yaml | 6 ++++++ .../src/main/java/org/elasticsearch/bootstrap/Security.java | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/89248.yaml diff --git a/docs/changelog/89248.yaml b/docs/changelog/89248.yaml new file mode 100644 index 0000000000000..903078381f1ec --- /dev/null +++ b/docs/changelog/89248.yaml @@ -0,0 +1,6 @@ +pr: 89248 +summary: fix "path.conf'" typo in Security.java +area: Engine +type: bug +issues: + - 89327 diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Security.java b/server/src/main/java/org/elasticsearch/bootstrap/Security.java index 02e0e25aca2a8..f75d2314bf936 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -216,7 +216,7 @@ static void addFilePermissions(Permissions policy, Environment environment, Path addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), "read,readlink", false); - addDirectoryPath(policy, "path.conf'", environment.configFile(), "read,readlink", false); + addDirectoryPath(policy, "path.conf", environment.configFile(), "read,readlink", false); // read-write dirs addDirectoryPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete", false); addDirectoryPath(policy, Environment.PATH_LOGS_SETTING.getKey(), environment.logsFile(), "read,readlink,write,delete", false); From 825c3547911dfdf8a844d652773c87073343acdf Mon Sep 17 00:00:00 2001 From: Nikola Grcevski <6207777+grcevski@users.noreply.github.com> Date: Wed, 17 Aug 2022 13:02:29 -0400 Subject: [PATCH 247/265] Clean-up file watcher keys. (#89429) Clean-up all open watcher keys in FileSettingsService. --- .../service/FileSettingsService.java | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index 35349b5ad041d..66ec590a1b773 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -62,6 +62,7 @@ public class FileSettingsService extends AbstractLifecycleComponent implements C private volatile FileUpdateState fileUpdateState = null; private volatile WatchKey settingsDirWatchKey = null; + private volatile WatchKey configDirWatchKey = null; private volatile boolean active = false; private volatile boolean initialState = true; @@ -206,6 +207,17 @@ boolean watching() { return this.watchService != null; } + private void cleanupWatchKeys() { + if (settingsDirWatchKey != null) { + settingsDirWatchKey.cancel(); + settingsDirWatchKey = null; + } + if (configDirWatchKey != null) { + configDirWatchKey.cancel(); + configDirWatchKey = null; + } + } + synchronized void startWatcher(ClusterState clusterState, boolean onStartup) { if (watching() || active == false) { refreshExistingFileStateIfNeeded(clusterState); @@ -245,10 +257,11 @@ synchronized void startWatcher(ClusterState clusterState, boolean onStartup) { // We watch the config directory always, even if initially we had an operator directory // it can be deleted and created later. The config directory never goes away, we only // register it once for watching. - enableSettingsWatcher(null, operatorSettingsDir().getParent()); + configDirWatchKey = enableSettingsWatcher(configDirWatchKey, operatorSettingsDir().getParent()); } catch (Exception e) { if (watchService != null) { try { + cleanupWatchKeys(); this.watchService.close(); } catch (Exception ignore) {} finally { this.watchService = null; @@ -322,10 +335,7 @@ synchronized void stopWatcher() { logger.debug("stopping watcher ..."); if (watching()) { try { - if (settingsDirWatchKey != null) { - settingsDirWatchKey.cancel(); - settingsDirWatchKey = null; - } + cleanupWatchKeys(); fileUpdateState = null; watchService.close(); if (watcherThreadLatch != null) { From 63b850cac99d32824750fab36c0c10d5b341e721 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 17 Aug 2022 13:05:47 -0400 Subject: [PATCH 248/265] REST tests for cumulative pipeline aggs (#88966) Adds REST tests for the `cumulative_cardinality` and `cumulative_sum` pipeline aggregations. This gives us forwards and backwards compatibility tests for these aggs as well as mixed version cluster tests for these aggs. Relates to #26220 --- .../search.aggregation/500_cumulative_sum.yml | 143 ++++++++++++++ .../test/analytics/cumulative_cardinality.yml | 187 ++++++++++++------ 2 files changed, 265 insertions(+), 65 deletions(-) create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/500_cumulative_sum.yml diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/500_cumulative_sum.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/500_cumulative_sum.yml new file mode 100644 index 0000000000000..3d8704d49ded8 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/500_cumulative_sum.yml @@ -0,0 +1,143 @@ +setup: + - do: + bulk: + index: test + refresh: true + body: + - { "index": { } } + - { "@timestamp": "2022-01-01T00:00:00", "user": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-01T01:00:00", "user": 2 } + - { "index": { } } + - { "@timestamp": "2022-01-01T02:00:00", "user": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-02T00:00:00", "user": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-02T01:00:00", "user": 2 } + - { "index": { } } + - { "@timestamp": "2022-01-02T02:00:00", "user": 3 } + - { "index": { } } + - { "@timestamp": "2022-01-03T00:00:00", "user": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-03T01:00:00", "user": 2 } + - { "index": { } } + - { "@timestamp": "2022-01-03T03:00:00", "user": 4 } + - { "index": { } } + - { "@timestamp": "2022-01-04T00:00:00", "user": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-04T01:00:00", "user": 5 } + +--- +basic: + - do: + search: + index: test + body: + size: 0 + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + calendar_interval: day + aggs: + distinct_users: + cardinality: + field: user + users_sum: + cumulative_sum: + buckets_path: distinct_users + - match: { hits.total.value: 11 } + - length: { aggregations.@timestamp.buckets: 4 } + - match: { aggregations.@timestamp.buckets.0.key_as_string: "2022-01-01T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.0.distinct_users.value: 2 } + - match: { aggregations.@timestamp.buckets.0.users_sum.value: 2 } + - match: { aggregations.@timestamp.buckets.1.key_as_string: "2022-01-02T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.1.distinct_users.value: 3 } + - match: { aggregations.@timestamp.buckets.1.users_sum.value: 5 } + - match: { aggregations.@timestamp.buckets.2.key_as_string: "2022-01-03T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.2.distinct_users.value: 3 } + - match: { aggregations.@timestamp.buckets.2.users_sum.value: 8 } + - match: { aggregations.@timestamp.buckets.3.key_as_string: "2022-01-04T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.3.distinct_users.value: 2 } + - match: { aggregations.@timestamp.buckets.3.users_sum.value: 10 } + +--- +format: + - do: + search: + index: test + body: + size: 0 + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + calendar_interval: day + aggs: + distinct_users: + cardinality: + field: user + users_sum: + cumulative_sum: + buckets_path: distinct_users + format: "00" + - match: { hits.total.value: 11 } + - length: { aggregations.@timestamp.buckets: 4 } + - match: { aggregations.@timestamp.buckets.0.key_as_string: "2022-01-01T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.0.distinct_users.value: 2 } + - match: { aggregations.@timestamp.buckets.0.users_sum.value_as_string: "02" } + - match: { aggregations.@timestamp.buckets.1.key_as_string: "2022-01-02T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.1.distinct_users.value: 3 } + - match: { aggregations.@timestamp.buckets.1.users_sum.value_as_string: "05" } + - match: { aggregations.@timestamp.buckets.2.key_as_string: "2022-01-03T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.2.distinct_users.value: 3 } + - match: { aggregations.@timestamp.buckets.2.users_sum.value_as_string: "08" } + - match: { aggregations.@timestamp.buckets.3.key_as_string: "2022-01-04T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.3.distinct_users.value: 2 } + - match: { aggregations.@timestamp.buckets.3.users_sum.value_as_string: "10" } + +--- +no results: + - do: + search: + index: test + body: + size: 0 + query: + match: + missing_field: not_found + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + calendar_interval: day + aggs: + distinct_users: + cardinality: + field: user + users_sum: + cumulative_sum: + buckets_path: distinct_users + - match: { hits.total.value: 0 } + - length: { aggregations.@timestamp.buckets: 0 } + +--- +bad path: + - do: + catch: '/Validation Failed: 1: No aggregation found for path \[missing\];/' + search: + index: test + body: + size: 0 + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + calendar_interval: day + aggs: + distinct_users: + cardinality: + field: user + users_sum: + cumulative_sum: + buckets_path: missing diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/cumulative_cardinality.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/cumulative_cardinality.yml index b59912e86f2a5..dee29ebd34767 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/cumulative_cardinality.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/cumulative_cardinality.yml @@ -1,86 +1,143 @@ setup: - - skip: - features: headers - do: - indices.create: - index: foo - body: - mappings: - properties: - timestamp: - type: date - user: - type: keyword - - - - do: - headers: - Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser bulk: + index: test refresh: true body: - - index: - _index: "foo" - - timestamp: "2017-01-01T05:00:00Z" - user: "a" - - - index: - _index: "foo" - - timestamp: "2017-01-01T05:00:00Z" - user: "b" - - - index: - _index: "foo" - - timestamp: "2017-01-01T05:00:00Z" - user: "c" - - - index: - _index: "foo" - - timestamp: "2017-01-02T05:00:00Z" - user: "a" - - - index: - _index: "foo" - - timestamp: "2017-01-02T05:00:00Z" - user: "b" - - - index: - _index: "foo" - - timestamp: "2017-01-03T05:00:00Z" - user: "d" + - { "index": { } } + - { "@timestamp": "2022-01-01T00:00:00", "user": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-01T01:00:00", "user": 2 } + - { "index": { } } + - { "@timestamp": "2022-01-01T02:00:00", "user": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-02T00:00:00", "user": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-02T01:00:00", "user": 2 } + - { "index": { } } + - { "@timestamp": "2022-01-02T02:00:00", "user": 3 } + - { "index": { } } + - { "@timestamp": "2022-01-03T00:00:00", "user": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-03T01:00:00", "user": 2 } + - { "index": { } } + - { "@timestamp": "2022-01-03T03:00:00", "user": 4 } + - { "index": { } } + - { "@timestamp": "2022-01-04T00:00:00", "user": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-04T01:00:00", "user": 5 } --- -"Basic Search": +basic: + - do: + search: + index: test + body: + size: 0 + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + calendar_interval: day + aggs: + distinct_users: + cardinality: + field: user + total_users: + cumulative_cardinality: + buckets_path: distinct_users + - match: { hits.total.value: 11 } + - length: { aggregations.@timestamp.buckets: 4 } + - match: { aggregations.@timestamp.buckets.0.key_as_string: "2022-01-01T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.0.distinct_users.value: 2 } + - match: { aggregations.@timestamp.buckets.0.total_users.value: 2 } + - match: { aggregations.@timestamp.buckets.1.key_as_string: "2022-01-02T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.1.distinct_users.value: 3 } + - match: { aggregations.@timestamp.buckets.1.total_users.value: 3 } + - match: { aggregations.@timestamp.buckets.2.key_as_string: "2022-01-03T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.2.distinct_users.value: 3 } + - match: { aggregations.@timestamp.buckets.2.total_users.value: 4 } + - match: { aggregations.@timestamp.buckets.3.key_as_string: "2022-01-04T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.3.distinct_users.value: 2 } + - match: { aggregations.@timestamp.buckets.3.total_users.value: 5 } +--- +format: - do: search: - index: "foo" + index: test body: size: 0 aggs: - histo: + "@timestamp": date_histogram: - field: "timestamp" - calendar_interval: "day" + field: "@timestamp" + calendar_interval: day aggs: distinct_users: cardinality: - field: "user" + field: user total_users: cumulative_cardinality: - buckets_path: "distinct_users" + buckets_path: distinct_users + format: "00" + - match: { hits.total.value: 11 } + - length: { aggregations.@timestamp.buckets: 4 } + - match: { aggregations.@timestamp.buckets.0.key_as_string: "2022-01-01T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.0.distinct_users.value: 2 } + - match: { aggregations.@timestamp.buckets.0.total_users.value_as_string: "02" } + - match: { aggregations.@timestamp.buckets.1.key_as_string: "2022-01-02T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.1.distinct_users.value: 3 } + - match: { aggregations.@timestamp.buckets.1.total_users.value_as_string: "03" } + - match: { aggregations.@timestamp.buckets.2.key_as_string: "2022-01-03T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.2.distinct_users.value: 3 } + - match: { aggregations.@timestamp.buckets.2.total_users.value_as_string: "04" } + - match: { aggregations.@timestamp.buckets.3.key_as_string: "2022-01-04T00:00:00.000Z" } + - match: { aggregations.@timestamp.buckets.3.distinct_users.value: 2 } + - match: { aggregations.@timestamp.buckets.3.total_users.value_as_string: "05" } - - length: { aggregations.histo.buckets: 3 } - - match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T00:00:00.000Z" } - - match: { aggregations.histo.buckets.0.doc_count: 3 } - - match: { aggregations.histo.buckets.0.distinct_users.value: 3 } - - match: { aggregations.histo.buckets.0.total_users.value: 3 } - - match: { aggregations.histo.buckets.1.key_as_string: "2017-01-02T00:00:00.000Z" } - - match: { aggregations.histo.buckets.1.doc_count: 2 } - - match: { aggregations.histo.buckets.1.distinct_users.value: 2 } - - match: { aggregations.histo.buckets.1.total_users.value: 3 } - - match: { aggregations.histo.buckets.2.key_as_string: "2017-01-03T00:00:00.000Z" } - - match: { aggregations.histo.buckets.2.doc_count: 1 } - - match: { aggregations.histo.buckets.2.distinct_users.value: 1 } - - match: { aggregations.histo.buckets.2.total_users.value: 4 } +--- +no results: + - do: + search: + index: test + body: + size: 0 + query: + match: + missing_field: not_found + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + calendar_interval: day + aggs: + distinct_users: + cardinality: + field: user + total_users: + cumulative_cardinality: + buckets_path: distinct_users + - match: { hits.total.value: 0 } + - length: { aggregations.@timestamp.buckets: 0 } +--- +bad path: + - do: + catch: '/Validation Failed: 1: No aggregation found for path \[missing\];/' + search: + index: test + body: + size: 0 + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + calendar_interval: day + aggs: + distinct_users: + cardinality: + field: user + total_users: + cumulative_cardinality: + buckets_path: missing From b46d95b2fbbdfa56f5bb4a185e07cd2586d8cee7 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 17 Aug 2022 13:19:49 -0400 Subject: [PATCH 249/265] REST tests for percentiles_bucket agg (#88029) Adds REST tests for the `percentiles_bucket` pipeline bucket aggregation. This gives us forwards and backwards compatibility tests for these aggs as well as mixed version cluster tests for these aggs. Relates to #26220 --- .../500_percentiles_bucket.yml | 293 ++++++++++++++++++ 1 file changed, 293 insertions(+) create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/500_percentiles_bucket.yml diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/500_percentiles_bucket.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/500_percentiles_bucket.yml new file mode 100644 index 0000000000000..2133f0b3a0f3a --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/500_percentiles_bucket.yml @@ -0,0 +1,293 @@ +setup: + - do: + bulk: + index: no_gaps + refresh: true + body: + - { "index": { } } + - { "@timestamp": "2022-01-01T00:00:00", "v": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-01T01:00:00", "v": 2 } + - { "index": { } } + - { "@timestamp": "2022-01-01T02:00:00", "v": 1 } + + - do: + bulk: + index: gaps + refresh: true + body: + - { "index": { } } + - { "@timestamp": "2022-01-01T00:00:00", "v": 1 } + - { "index": { } } + - { "@timestamp": "2022-01-01T02:00:00", "v": 2 } + - { "index": { } } + - { "@timestamp": "2022-01-01T03:00:00", "v": 1 } + +--- +basic: + - do: + search: + index: no_gaps + body: + size: 0 + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + fixed_interval: 1h + aggs: + v: {avg: {field: v}} + d: + percentiles_bucket: + buckets_path: "@timestamp>v" + - match: { hits.total.value: 3 } + - length: { aggregations.@timestamp.buckets: 3 } + - match: + aggregations.d.values: + 1.0: 1.0 + 5.0: 1.0 + 25.0: 1.0 + 50.0: 1.0 + 75.0: 2.0 + 95.0: 2.0 + 99.0: 2.0 + +--- +format: + - do: + search: + index: no_gaps + body: + size: 0 + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + fixed_interval: 1h + aggs: + v: {avg: {field: v}} + d: + percentiles_bucket: + buckets_path: "@timestamp>v" + format: "0.00" + - match: { hits.total.value: 3 } + - length: { aggregations.@timestamp.buckets: 3 } + - match: + aggregations.d.values: + 1.0: 1.0 + 5.0: 1.0 + 25.0: 1.0 + 50.0: 1.0 + 75.0: 2.0 + 95.0: 2.0 + 99.0: 2.0 + 1.0_as_string: "1.00" + 5.0_as_string: "1.00" + 50.0_as_string: "1.00" + 25.0_as_string: "1.00" + 75.0_as_string: "2.00" + 95.0_as_string: "2.00" + 99.0_as_string: "2.00" + +--- +gap_policy=skip: + - do: + search: + index: gaps + body: + size: 0 + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + fixed_interval: 1h + aggs: + v: {avg: {field: v}} + d: + percentiles_bucket: + buckets_path: "@timestamp>v" + gap_policy: skip + - match: { hits.total.value: 3 } + - length: { aggregations.@timestamp.buckets: 4 } + - match: + aggregations.d.values: + 1.0: 1.0 + 5.0: 1.0 + 25.0: 1.0 + 50.0: 1.0 + 75.0: 2.0 + 95.0: 2.0 + 99.0: 2.0 + +--- +gap_policy=insert_zeros: + - do: + search: + index: gaps + body: + size: 0 + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + fixed_interval: 1h + aggs: + v: {avg: {field: v}} + d: + percentiles_bucket: + buckets_path: "@timestamp>v" + gap_policy: insert_zeros + - match: { hits.total.value: 3 } + - length: { aggregations.@timestamp.buckets: 4 } + - match: + aggregations.d.values: + 1.0: 0.0 + 5.0: 0.0 + 25.0: 1.0 + 50.0: 1.0 + 75.0: 1.0 + 95.0: 2.0 + 99.0: 2.0 + +--- +gap_policy=keep_value: + - do: + search: + index: gaps + body: + size: 0 + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + fixed_interval: 1h + aggs: + v: {avg: {field: v}} + d: + percentiles_bucket: + buckets_path: "@timestamp>v" + gap_policy: keep_values + - match: { hits.total.value: 3 } + - length: { aggregations.@timestamp.buckets: 4 } + - match: + aggregations.d.values: + 1.0: 1.0 + 5.0: 1.0 + 25.0: 1.0 + 50.0: 1.0 + 75.0: 2.0 + 95.0: 2.0 + 99.0: 2.0 + +--- +dotted name: + - do: + search: + index: no_gaps + body: + size: 0 + aggs: + "@time.stamp": + date_histogram: + field: "@timestamp" + fixed_interval: 1h + aggs: + v: {avg: {field: v}} + d: + percentiles_bucket: + buckets_path: "@time.stamp>v" + - match: { hits.total.value: 3 } + - length: { aggregations.@time\.stamp.buckets: 3 } + - match: + aggregations.d.values: + 1.0: 1.0 + 5.0: 1.0 + 25.0: 1.0 + 50.0: 1.0 + 75.0: 2.0 + 95.0: 2.0 + 99.0: 2.0 + +--- +dotted value: + - do: + search: + index: no_gaps + body: + size: 0 + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + fixed_interval: 1h + aggs: + v: + percentiles: + field: v + percents: [ 50, 99.9 ] + d: + percentiles_bucket: + buckets_path: "@timestamp>v[99.9]" + - match: { hits.total.value: 3 } + - length: { aggregations.@timestamp.buckets: 3 } + - match: + aggregations.d.values: + 1.0: 1.0 + 5.0: 1.0 + 25.0: 1.0 + 50.0: 1.0 + 75.0: 2.0 + 95.0: 2.0 + 99.0: 2.0 + +--- +no results: + - do: + search: + index: no_gaps + body: + size: 0 + query: + match: + missing_field: not_found + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + fixed_interval: 1h + aggs: + v: {avg: {field: v}} + d: + percentiles_bucket: + buckets_path: "@timestamp>v" + - match: { hits.total.value: 0 } + - length: { aggregations.@timestamp.buckets: 0 } + - match: + aggregations.d.values: + 1.0: null + 5.0: null + 25.0: null + 50.0: null + 75.0: null + 95.0: null + 99.0: null + +--- +bad path: + - do: + catch: '/Validation Failed: 1: No aggregation \[v\] found for path \[@timestamp>v\];/' + search: + index: no_gaps + body: + size: 0 + query: + match: + missing_field: not_found + aggs: + "@timestamp": + date_histogram: + field: "@timestamp" + fixed_interval: 1h + d: + percentiles_bucket: + buckets_path: "@timestamp>v" From 3bde177fea3ed5a80485f841fe8a55b6314423f3 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Wed, 17 Aug 2022 15:24:18 -0400 Subject: [PATCH 250/265] Rollover min_* conditions docs and highlight (#89434) --- docs/changelog/83345.yaml | 30 ++++++++ .../ilm/actions/ilm-rollover.asciidoc | 77 ++++++++++++++++--- docs/reference/ilm/index-rollover.asciidoc | 2 +- .../reference/indices/rollover-index.asciidoc | 12 ++- 4 files changed, 107 insertions(+), 14 deletions(-) diff --git a/docs/changelog/83345.yaml b/docs/changelog/83345.yaml index 570dc85b319e2..25e49cd882719 100644 --- a/docs/changelog/83345.yaml +++ b/docs/changelog/83345.yaml @@ -3,3 +3,33 @@ summary: Add min_* conditions to rollover area: ILM+SLM type: enhancement issues: [] +highlight: + title: Minimum conditions for the rollover API and ILM actions + body: |- + The rollover API and ILM actions now support minimum conditions for rollover. + + Minimum conditions prevent rollover from occuring until they are met. That is, an index + will rollover once one or more max conditions are satisfied and all min conditions are satisfied. + + As an example, the following ILM policy would roll an index over if it is at least 7 days old or + at least 100 gigabytes, but only as long as the index is not empty. + + ``` + PUT _ilm/policy/my_policy + { + "policy": { + "phases": { + "hot": { + "actions": { + "rollover" : { + "max_age": "7d", + "max_size": "100gb", + "min_docs": 1 + } + } + } + } + } + } + ``` + notable: true diff --git a/docs/reference/ilm/actions/ilm-rollover.asciidoc b/docs/reference/ilm/actions/ilm-rollover.asciidoc index 980ec54591038..823ba92f29778 100644 --- a/docs/reference/ilm/actions/ilm-rollover.asciidoc +++ b/docs/reference/ilm/actions/ilm-rollover.asciidoc @@ -4,7 +4,8 @@ Phases allowed: hot. -Rolls over a target to a new index when the existing index meets one or more of the rollover conditions. +Rolls over a target to a new index when the existing index satisfies +the specified rollover conditions. IMPORTANT: If the rollover action is used on a <>, policy execution waits until the leader index rolls over (or is @@ -45,8 +46,11 @@ PUT my-index-000001 [[ilm-rollover-options]] ==== Options -You must specify at least one rollover condition. -An empty rollover action is invalid. +A rollover action must specify at least one max_* condition, it may include zero +or more min_* conditions. An empty rollover action is invalid. + +The index will rollover once any max_* condition is satisfied and all +min_* conditions are satisfied. // tag::rollover-conditions[] `max_age`:: @@ -90,6 +94,32 @@ replicas are ignored. + TIP: To see the current shard docs, use the <> API. The `docs` value shows the number of documents each shard. + +`min_age`:: +(Optional, <>) +Prevents rollover until after the minimum elapsed time from index creation is reached. +See notes on `max_age`. + +`min_docs`:: +(Optional, integer) +Prevents rollover until after the specified minimum number of documents is reached. +See notes on `max_docs`. + +`min_size`:: +(Optional, <>) +Prevents rollover until the index reaches a certain size. +See notes on `max_size`. + +`min_primary_shard_size`:: +(Optional, <>) +Prevents rollover until the largest primary shard in the index reaches a certain size. +See notes on `max_primary_shard_size`. + +`min_primary_shard_docs`:: +(Optional, integer) +Prevents rollover until the largest primary shard in the index reaches a certain number of documents. +See notes on `max_primary_shard_docs`. + // end::rollover-conditions[] [[ilm-rollover-ex]] @@ -109,7 +139,7 @@ PUT _ilm/policy/my_policy "hot": { "actions": { "rollover" : { - "max_primary_shard_size": "50GB" + "max_primary_shard_size": "50gb" } } } @@ -132,7 +162,7 @@ PUT _ilm/policy/my_policy "hot": { "actions": { "rollover" : { - "max_size": "100GB" + "max_size": "100gb" } } } @@ -214,8 +244,9 @@ PUT _ilm/policy/my_policy ===== Roll over using multiple conditions When you specify multiple rollover conditions, -the index is rolled over when _any_ of the conditions are met. -This example rolls the index over if it is at least 7 days old or at least 100 gigabytes. +the index is rolled over when _any_ of the max_* and _all_ of the min_* conditions are met. +This example rolls the index over if it is at least 7 days old or at least 100 gigabytes, +but only as long as the index is not empty. [source,console] -------------------------------------------------- @@ -227,7 +258,35 @@ PUT _ilm/policy/my_policy "actions": { "rollover" : { "max_age": "7d", - "max_size": "100GB" + "max_size": "100gb", + "min_docs": 1 + } + } + } + } + } +} +-------------------------------------------------- + +[ilm-rollover-conditions-ex]] +===== Roll over while maintaining shard sizes + +This example rolls the index over when the primary shard size is at least 50gb, +or when the index is at least 30 days old, but only as long as a primary shard is at least 1gb. +For low-volume indices, this prevents the creation of many small shards. + +[source,console] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover" : { + "max_primary_shard_size": "50gb", + "max_age": "30d", + "min_primary_shard_size": "1gb" } } } @@ -254,7 +313,7 @@ PUT /_ilm/policy/rollover_policy "hot": { "actions": { "rollover": { - "max_size": "50GB" + "max_size": "50gb" } } }, diff --git a/docs/reference/ilm/index-rollover.asciidoc b/docs/reference/ilm/index-rollover.asciidoc index 9c69ad968041c..3755619a6f15a 100644 --- a/docs/reference/ilm/index-rollover.asciidoc +++ b/docs/reference/ilm/index-rollover.asciidoc @@ -43,7 +43,7 @@ On each rollover, the new index becomes the write index. === Automatic rollover {ilm-init} enables you to automatically roll over to a new index based -on the index size, document count, or age. When a rollover is triggered, a new +on conditions like the index size, document count, or age. When a rollover is triggered, a new index is created, the write alias is updated to point to the new index, and all subsequent updates are written to the new index. diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index d8d67c29a540e..3869f35b560fa 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -111,7 +111,7 @@ include::{es-repo-dir}/indices/create-index.asciidoc[tag=index-name-reqs] `dry_run`:: (Optional, Boolean) -If `true`, checks whether the current index matches one or more specified +If `true`, checks whether the current index satisfies the specified `conditions` but does not perform a rollover. Defaults to `false`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] @@ -132,10 +132,14 @@ include::{es-repo-dir}/indices/create-index.asciidoc[tag=aliases-props] `conditions`:: (Optional, object) Conditions for the rollover. If specified, {es} only performs the rollover if -the current index meets one or more of these conditions. If this parameter is +the current index satisfies these conditions. If this parameter is not specified, {es} performs the rollover unconditionally. + -IMPORTANT: To trigger a rollover, the current index must meet these conditions +If conditions are specified, at least one of them must be a max_* condition. +The index will rollover if any max_* condition is satisfied and all +min_* conditions are satisfied. ++ +IMPORTANT: To trigger a rollover, the current index must satisfy these conditions at the time of the request. {es} does not monitor the index after the API response. To automate rollover, use {ilm-init}'s <> instead. @@ -197,7 +201,7 @@ conditions were specified, this is an empty object. ==== ``:: (Boolean) The key is each condition. The value is its result. If `true`, the -index met the condition at rollover. +index met the condition. ==== [[rollover-index-api-example]] From 725367e14bce0f0ba48f668f3608861814a6f726 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 18 Aug 2022 12:07:04 +1000 Subject: [PATCH 251/265] User Profile - Detailed errors in hasPrivileges response (#89224) This PR adds a new `errors` field in the ProfilehasPrivileges response to report detailed errors encountered, including missing UIDs. It also removes the existing `errors_uids` field since this is redundant after the change. --- docs/changelog/89224.yaml | 5 ++ .../has-privileges-user-profile.asciidoc | 35 ++++++-- .../user/ProfileHasPrivilegesResponse.java | 28 +++--- .../ProfileHasPrivilegesResponseTests.java | 81 +++++++++++++---- .../xpack/security/profile/ProfileIT.java | 13 ++- .../security/profile/ProfileIntegTests.java | 4 +- .../TransportProfileHasPrivilegesAction.java | 15 ++-- .../security/profile/ProfileService.java | 34 ++----- ...nsportProfileHasPrivilegesActionTests.java | 29 +++--- .../security/profile/ProfileServiceTests.java | 90 ++++++++----------- .../test/user_profile/40_has_privileges.yml | 9 ++ 11 files changed, 203 insertions(+), 140 deletions(-) create mode 100644 docs/changelog/89224.yaml diff --git a/docs/changelog/89224.yaml b/docs/changelog/89224.yaml new file mode 100644 index 0000000000000..8704eb3717aec --- /dev/null +++ b/docs/changelog/89224.yaml @@ -0,0 +1,5 @@ +pr: 89224 +summary: User Profile - Detailed errors in `hasPrivileges` response +area: Security +type: enhancement +issues: [] diff --git a/x-pack/docs/en/rest-api/security/has-privileges-user-profile.asciidoc b/x-pack/docs/en/rest-api/security/has-privileges-user-profile.asciidoc index de0524ea4cac2..3bb4a19787952 100644 --- a/x-pack/docs/en/rest-api/security/has-privileges-user-profile.asciidoc +++ b/x-pack/docs/en/rest-api/security/has-privileges-user-profile.asciidoc @@ -68,14 +68,21 @@ Note that the `privileges` section above is identical to the ==== {api-response-body-title} A successful has privileges user profile API call returns a JSON structure that contains -two list fields: +two fields: `has_privilege_uids`:: (list) The subset of the requested profile IDs of the users that have **all** the requested privileges. -`error_uids`:: (list) The subset of the requested profile IDs for which an error was -encountered. It does **not** include the missing profile IDs or the profile IDs of -the users that do not have all the requested privileges. This field is absent if empty. +`errors`:: (object) Errors encountered while fulfilling the request. This field is absent if there is no error. +It does **not** include the profile IDs of the users that do not have all the requested privileges. ++ +.Properties of objects in `errors` +[%collapsible%open] +==== +`count`:: (number) Total number of errors + +`details`:: (object) The detailed error report with keys being profile IDs and values being the exact errors. +==== [[security-api-has-privileges-user-profile-example]] ==== {api-examples-title} @@ -87,7 +94,11 @@ requested set of cluster, index, and application privileges: -------------------------------------------------- POST /_security/user/_has_privileges { - "uids": ["u_LQPnxDxEjIH0GOUoFkZr5Y57YUwSkL9Joiq-g4OCbPc_0", "u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1"], + "uids": [ + "u_LQPnxDxEjIH0GOUoFkZr5Y57YUwSkL9Joiq-g4OCbPc_0", + "u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1", + "u_does-not-exist_0" + ], "cluster": [ "monitor", "create_snapshot", "manage_ml" ], "index" : [ { @@ -110,12 +121,22 @@ POST /_security/user/_has_privileges -------------------------------------------------- // TEST[skip:TODO setup and tests will be possible once the profile uid is predictable] -The following example output indicates that only one of the two users has all the privileges: +The following example output indicates that only one of the three users has all the privileges +and one of them is not found: [source,js] -------------------------------------------------- { - "has_privilege_uids": ["u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1"] + "has_privilege_uids": ["u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1"], + "errors": { + "count": 1, + "details": { + "u_does-not-exist_0": { + "type": "resource_not_found_exception", + "reason": "profile document not found" + } + } + } } -------------------------------------------------- // NOTCONSOLE diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ProfileHasPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ProfileHasPrivilegesResponse.java index 1fbe31c6ae9c9..0f2380f1c0fb2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ProfileHasPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ProfileHasPrivilegesResponse.java @@ -12,34 +12,36 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import java.io.IOException; +import java.util.Map; import java.util.Objects; import java.util.Set; public class ProfileHasPrivilegesResponse extends ActionResponse implements ToXContentObject { private Set hasPrivilegeUids; - private Set errorUids; + private final Map errors; public ProfileHasPrivilegesResponse(StreamInput in) throws IOException { super(in); this.hasPrivilegeUids = in.readSet(StreamInput::readString); - this.errorUids = in.readSet(StreamInput::readString); + this.errors = in.readMap(StreamInput::readString, StreamInput::readException); } - public ProfileHasPrivilegesResponse(Set hasPrivilegeUids, Set errorUids) { + public ProfileHasPrivilegesResponse(Set hasPrivilegeUids, Map errors) { super(); this.hasPrivilegeUids = Objects.requireNonNull(hasPrivilegeUids); - this.errorUids = Objects.requireNonNull(errorUids); + this.errors = Objects.requireNonNull(errors); } public Set hasPrivilegeUids() { return hasPrivilegeUids; } - public Set errorUids() { - return errorUids; + public Map errors() { + return errors; } @Override @@ -47,31 +49,31 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ProfileHasPrivilegesResponse that = (ProfileHasPrivilegesResponse) o; - return hasPrivilegeUids.equals(that.hasPrivilegeUids) && errorUids.equals(that.errorUids); + // Only compare the keys (profile uids) of the errors, actual error types do not matter + return hasPrivilegeUids.equals(that.hasPrivilegeUids) && errors.keySet().equals(that.errors.keySet()); } @Override public int hashCode() { - return Objects.hash(hasPrivilegeUids, errorUids); + // Only include the keys (profile uids) of the errors, actual error types do not matter + return Objects.hash(hasPrivilegeUids, errors.keySet()); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { XContentBuilder xContentBuilder = builder.startObject().stringListField("has_privilege_uids", hasPrivilegeUids); - if (false == errorUids.isEmpty()) { - xContentBuilder.stringListField("error_uids", errorUids); - } + XContentUtils.maybeAddErrorDetails(builder, errors); return xContentBuilder.endObject(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(hasPrivilegeUids); - out.writeStringCollection(errorUids); + out.writeMap(errors, StreamOutput::writeString, StreamOutput::writeException); } @Override public String toString() { - return getClass().getSimpleName() + "{" + "has_privilege_uids=" + hasPrivilegeUids + ", error_uids=" + errorUids + "}"; + return getClass().getSimpleName() + "{" + "has_privilege_uids=" + hasPrivilegeUids + ", errors=" + errors + "}"; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/profile/ProfileHasPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/profile/ProfileHasPrivilegesResponseTests.java index 08ad8d06698ff..67e18206cdcb8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/profile/ProfileHasPrivilegesResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/profile/ProfileHasPrivilegesResponseTests.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.core.security.action.profile; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentHelper; @@ -14,15 +16,21 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.action.user.ProfileHasPrivilegesResponse; import java.io.IOException; import java.util.ArrayList; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TreeMap; +import java.util.function.Supplier; +import java.util.stream.IntStream; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; public class ProfileHasPrivilegesResponseTests extends AbstractWireSerializingTestCase { @@ -35,16 +43,19 @@ protected Writeable.Reader instanceReader() { protected ProfileHasPrivilegesResponse createTestInstance() { return new ProfileHasPrivilegesResponse( randomUnique(() -> randomAlphaOfLengthBetween(0, 5), randomIntBetween(0, 5)), - randomUnique(() -> randomAlphaOfLengthBetween(0, 5), randomIntBetween(0, 5)) + randomErrors() ); } @Override protected ProfileHasPrivilegesResponse mutateInstance(ProfileHasPrivilegesResponse instance) throws IOException { return randomFrom( - new ProfileHasPrivilegesResponse(newMutatedSet(instance.hasPrivilegeUids()), instance.errorUids()), - new ProfileHasPrivilegesResponse(instance.hasPrivilegeUids(), newMutatedSet(instance.errorUids())), - new ProfileHasPrivilegesResponse(newMutatedSet(instance.hasPrivilegeUids()), newMutatedSet(instance.errorUids())) + new ProfileHasPrivilegesResponse(newMutatedSet(instance.hasPrivilegeUids()), instance.errors()), + new ProfileHasPrivilegesResponse(instance.hasPrivilegeUids(), randomValueOtherThan(instance.errors(), this::randomErrors)), + new ProfileHasPrivilegesResponse( + newMutatedSet(instance.hasPrivilegeUids()), + randomValueOtherThan(instance.errors(), this::randomErrors) + ) ); } @@ -55,20 +66,47 @@ public void testToXContent() throws IOException { final Map responseMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()) .v2(); - if (response.errorUids().isEmpty()) { + if (response.errors().isEmpty()) { assertThat(responseMap, equalTo(Map.of("has_privilege_uids", new ArrayList<>(response.hasPrivilegeUids())))); } else { - assertThat( - responseMap, - equalTo( - Map.of( - "has_privilege_uids", - new ArrayList<>(response.hasPrivilegeUids()), - "error_uids", - new ArrayList<>(response.errorUids()) - ) - ) - ); + assertThat(responseMap, hasEntry("has_privilege_uids", List.copyOf(response.hasPrivilegeUids()))); + @SuppressWarnings("unchecked") + final Map errorsMap = (Map) responseMap.get("errors"); + assertThat(errorsMap.get("count"), equalTo(response.errors().size())); + @SuppressWarnings("unchecked") + final Map detailsMap = (Map) errorsMap.get("details"); + assertThat(detailsMap.keySet(), equalTo(response.errors().keySet())); + + detailsMap.forEach((k, v) -> { + final String errorString; + final Exception e = response.errors().get(k); + if (e instanceof IllegalArgumentException illegalArgumentException) { + errorString = """ + { + "type": "illegal_argument_exception", + "reason": "%s" + }""".formatted(illegalArgumentException.getMessage()); + } else if (e instanceof ResourceNotFoundException resourceNotFoundException) { + errorString = """ + { + "type": "resource_not_found_exception", + "reason": "%s" + }""".formatted(resourceNotFoundException.getMessage()); + } else if (e instanceof ElasticsearchException elasticsearchException) { + errorString = """ + { + "type": "exception", + "reason": "%s", + "caused_by": { + "type": "illegal_argument_exception", + "reason": "%s" + } + }""".formatted(elasticsearchException.getMessage(), elasticsearchException.getCause().getMessage()); + } else { + throw new IllegalArgumentException("unknown exception type: " + e); + } + assertThat(v, equalTo(XContentHelper.convertToMap(JsonXContent.jsonXContent, errorString, false))); + }); } } @@ -86,4 +124,15 @@ private Set newMutatedSet(Set in) { } return mutated; } + + private Map randomErrors() { + final Map errors = new TreeMap<>(); + final Supplier randomExceptionSupplier = () -> randomFrom( + new IllegalArgumentException(randomAlphaOfLengthBetween(0, 18)), + new ResourceNotFoundException(randomAlphaOfLengthBetween(0, 18)), + new ElasticsearchException(randomAlphaOfLengthBetween(0, 18), new IllegalArgumentException(randomAlphaOfLengthBetween(0, 18))) + ); + IntStream.range(0, randomIntBetween(0, 3)).forEach(i -> errors.put(randomAlphaOfLength(20) + i, randomExceptionSupplier.get())); + return errors; + } } diff --git a/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java b/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java index be60db248fede..8644529fb859e 100644 --- a/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java +++ b/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java @@ -115,8 +115,19 @@ public void testProfileHasPrivileges() throws IOException { final Response profileHasPrivilegesResponse = adminClient().performRequest(profileHasPrivilegesRequest); assertOK(profileHasPrivilegesResponse); Map profileHasPrivilegesResponseMap = responseAsMap(profileHasPrivilegesResponse); - assertThat(profileHasPrivilegesResponseMap.keySet(), contains("has_privilege_uids")); + assertThat(profileHasPrivilegesResponseMap.keySet(), contains("has_privilege_uids", "errors")); assertThat(((List) profileHasPrivilegesResponseMap.get("has_privilege_uids")), contains(profileUid)); + assertThat( + profileHasPrivilegesResponseMap.get("errors"), + equalTo( + Map.of( + "count", + 1, + "details", + Map.of("some_missing_profile", Map.of("type", "resource_not_found_exception", "reason", "profile document not found")) + ) + ) + ); } public void testGetProfiles() throws IOException { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java index b7ccd0b1698fe..b3d25cd210850 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java @@ -586,7 +586,7 @@ public void testProfileAPIsWhenIndexNotCreated() { ) ).actionGet(); assertThat(profileHasPrivilegesResponse.hasPrivilegeUids(), emptyIterable()); - assertThat(profileHasPrivilegesResponse.errorUids(), emptyIterable()); + assertThat(profileHasPrivilegesResponse.errors(), anEmptyMap()); // Ensure index does not exist assertThat(getProfileIndexResponse().getIndices(), not(hasItemInArray(INTERNAL_SECURITY_PROFILE_INDEX_8))); @@ -650,7 +650,7 @@ public void testSetEnabled() { ) ).actionGet(); assertThat(profileHasPrivilegesResponse.hasPrivilegeUids(), emptyIterable()); - assertThat(profileHasPrivilegesResponse.errorUids(), emptyIterable()); + assertThat(profileHasPrivilegesResponse.errors(), anEmptyMap()); // Enable again for search final SetProfileEnabledRequest setProfileEnabledRequest2 = new SetProfileEnabledRequest( diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportProfileHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportProfileHasPrivilegesAction.java index 66360dd4381ae..ebe84fffbd4b4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportProfileHasPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/profile/TransportProfileHasPrivilegesAction.java @@ -36,6 +36,7 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -75,20 +76,18 @@ public TransportProfileHasPrivilegesAction( protected void doExecute(Task task, ProfileHasPrivilegesRequest request, ActionListener listener) { assert task instanceof CancellableTask : "task must be cancellable"; profileService.getProfileSubjects(request.profileUids(), ActionListener.wrap(profileSubjectsAndFailures -> { - if (profileSubjectsAndFailures.profileUidToSubject().isEmpty()) { - listener.onResponse(new ProfileHasPrivilegesResponse(Set.of(), profileSubjectsAndFailures.failureProfileUids())); + if (profileSubjectsAndFailures.results().isEmpty()) { + listener.onResponse(new ProfileHasPrivilegesResponse(Set.of(), profileSubjectsAndFailures.errors())); return; } final Set hasPrivilegeProfiles = Collections.synchronizedSet(new HashSet<>()); - final Set errorProfiles = Collections.synchronizedSet(new HashSet<>(profileSubjectsAndFailures.failureProfileUids())); - final Collection> profileUidAndSubjects = profileSubjectsAndFailures.profileUidToSubject() - .entrySet(); - final AtomicInteger counter = new AtomicInteger(profileUidAndSubjects.size()); + final Map errorProfiles = new ConcurrentHashMap<>(profileSubjectsAndFailures.errors()); + final AtomicInteger counter = new AtomicInteger(profileSubjectsAndFailures.results().size()); assert counter.get() > 0; resolveApplicationPrivileges( request, ActionListener.wrap(applicationPrivilegeDescriptors -> threadPool.generic().execute(() -> { - for (Map.Entry profileUidToSubject : profileUidAndSubjects) { + for (Map.Entry profileUidToSubject : profileSubjectsAndFailures.results()) { // return the partial response if the "has privilege" task got cancelled in the meantime if (((CancellableTask) task).isCancelled()) { listener.onFailure(new TaskCancelledException("has privilege task cancelled")); @@ -107,7 +106,7 @@ protected void doExecute(Task task, ProfileHasPrivilegesRequest request, ActionL } }, checkPrivilegesException -> { logger.debug(() -> "Failed to check privileges for profile [" + profileUid + "]", checkPrivilegesException); - errorProfiles.add(profileUid); + errorProfiles.put(profileUid, checkPrivilegesException); }), () -> { if (counter.decrementAndGet() == 0) { listener.onResponse(new ProfileHasPrivilegesResponse(hasPrivilegeProfiles, errorProfiles)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java index 6c13806241081..278c570b04149 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; @@ -150,19 +149,21 @@ public void getProfiles(List uids, Set dataKeys, ActionListener< })); } - public void getProfileSubjects(Collection uids, ActionListener listener) { + public void getProfileSubjects(Collection uids, ActionListener>> listener) { getVersionedDocuments(uids, listener.map(resultsAndErrors -> { if (resultsAndErrors != null) { - return new MultiProfileSubjectResponse( + // convert the list of profile document to a list of "uid to subject" entries + return new ResultsAndErrors<>( resultsAndErrors.results() .stream() .map(VersionedDocument::doc) .filter(ProfileDocument::enabled) - .collect(Collectors.toMap(ProfileDocument::uid, profileDoc -> profileDoc.user().toSubject())), - Set.copyOf(errorUidsExcludingNotFound(resultsAndErrors.errors())) + .map(doc -> Map.entry(doc.uid(), doc.user().toSubject())) + .toList(), + resultsAndErrors.errors() ); } else { - return new MultiProfileSubjectResponse(Map.of(), Set.of()); + return new ResultsAndErrors<>(List.of(), Map.of()); } })); } @@ -386,6 +387,7 @@ private void getVersionedDocuments(Collection uids, ActionListener uids, ActionListener resultsAndErrors = new ResultsAndErrors<>(retrievedDocs, errors); - if (logger.isDebugEnabled() && false == resultsAndErrors.errors().isEmpty()) { - Exception loggedException = null; - final List errorUids = errorUidsExcludingNotFound(resultsAndErrors.errors()); - for (String uid : errorUids) { - loggedException = ExceptionsHelper.useOrSuppress(loggedException, resultsAndErrors.errors().get(uid)); - } - if (loggedException != null) { - logger.debug(() -> format("Failed to retrieve profiles %s", errorUids), loggedException); - } - } listener.onResponse(resultsAndErrors); }, listener::onFailure)) ); @@ -839,14 +831,6 @@ private static ProfileDocument updateWithSubject(ProfileDocument doc, Subject su ); } - private static List errorUidsExcludingNotFound(Map errors) { - return errors.entrySet() - .stream() - .filter(entry -> entry.getValue() != null && false == entry.getValue() instanceof ResourceNotFoundException) - .map(Map.Entry::getKey) - .toList(); - } - // Package private for testing record VersionedDocument(ProfileDocument doc, long primaryTerm, long seqNo) { @@ -874,6 +858,4 @@ Profile toProfile(Set dataKeys) { } } - - public record MultiProfileSubjectResponse(Map profileUidToSubject, Set failureProfileUids) {} } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/profile/TransportProfileHasPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/profile/TransportProfileHasPrivilegesActionTests.java index 1e5feb22b4697..ed01d0ca6fc40 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/profile/TransportProfileHasPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/profile/TransportProfileHasPrivilegesActionTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.common.ResultsAndErrors; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.user.ProfileHasPrivilegesRequest; import org.elasticsearch.xpack.core.security.action.user.ProfileHasPrivilegesResponse; @@ -32,7 +33,6 @@ import org.elasticsearch.xpack.security.authz.AuthorizationService; import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; import org.elasticsearch.xpack.security.profile.ProfileService; -import org.elasticsearch.xpack.security.profile.ProfileService.MultiProfileSubjectResponse; import org.junit.After; import org.junit.Before; @@ -45,6 +45,8 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.stream.Collectors; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; import static org.elasticsearch.xpack.core.security.action.profile.ProfileHasPrivilegesRequestTests.randomValidPrivilegesToCheckRequest; @@ -53,6 +55,7 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyIterable; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -117,9 +120,8 @@ public void testMultipleConcurrentCheckPrivileges() throws Exception { for (String uid : uidsArg) { profileUidToSubject.put(uid, new Subject(new User("user_for_profile_" + uid), mock(Authentication.RealmRef.class))); } - final ActionListener listener = (ActionListener) invocation - .getArguments()[1]; - listener.onResponse(new MultiProfileSubjectResponse(profileUidToSubject, Set.of())); + final var listener = (ActionListener>>) invocation.getArguments()[1]; + listener.onResponse(new ResultsAndErrors<>(profileUidToSubject.entrySet(), Map.of())); return null; }).when(profileService).getProfileSubjects(anyCollection(), anyActionListener()); @@ -152,7 +154,7 @@ public void testMultipleConcurrentCheckPrivileges() throws Exception { transportProfileHasPrivilegesAction.doExecute(mock(CancellableTask.class), request, listener); ProfileHasPrivilegesResponse response = listener.get(); - assertThat(response.errorUids(), is(errorProfileUids)); + assertThat(response.errors().keySet(), equalTo(errorProfileUids)); Set hasPrivilegeUids = new HashSet<>(allProfileUids); hasPrivilegeUids.removeAll(errorProfileUids); hasPrivilegeUids.removeAll(noPrivilegesProfileUids); @@ -170,9 +172,13 @@ public void testNoProfileSubjectsFound() throws Exception { ); doAnswer(invocation -> { - final ActionListener listener = (ActionListener) invocation - .getArguments()[1]; - listener.onResponse(new MultiProfileSubjectResponse(Map.of(), errorProfileUids)); + final var listener = (ActionListener>>) invocation.getArguments()[1]; + listener.onResponse( + new ResultsAndErrors<>( + List.of(), + errorProfileUids.stream().collect(Collectors.toMap(Function.identity(), uid -> mock(Exception.class))) + ) + ); return null; }).when(profileService).getProfileSubjects(anyCollection(), anyActionListener()); @@ -195,7 +201,7 @@ public void testNoProfileSubjectsFound() throws Exception { ProfileHasPrivilegesResponse response = listener.get(); assertThat(response.hasPrivilegeUids(), emptyIterable()); - assertThat(response.errorUids(), is(errorProfileUids)); + assertThat(response.errors().keySet(), equalTo(errorProfileUids)); } public void testDLSQueryIndicesPrivilegesRequestValidation() { @@ -236,9 +242,8 @@ public void testCancellation() throws Exception { for (String uid : uidsArg) { profileUidToSubject.put(uid, new Subject(new User("user_for_profile_" + uid), mock(Authentication.RealmRef.class))); } - final ActionListener listener = (ActionListener) invocation - .getArguments()[1]; - listener.onResponse(new MultiProfileSubjectResponse(profileUidToSubject, Set.of())); + final var listener = (ActionListener>>) invocation.getArguments()[1]; + listener.onResponse(new ResultsAndErrors<>(profileUidToSubject.entrySet(), Map.of())); return null; }).when(profileService).getProfileSubjects(anyCollection(), anyActionListener()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java index 5595f2550c1a6..d662aa723a50b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java @@ -7,10 +7,8 @@ package org.elasticsearch.xpack.security.profile; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkAction; @@ -37,9 +35,9 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.hash.MessageDigests; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -52,7 +50,6 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; @@ -87,7 +84,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TreeSet; import java.util.concurrent.ExecutionException; import java.util.function.Consumer; import java.util.stream.Collectors; @@ -102,7 +98,6 @@ import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_PROFILE_ALIAS; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.arrayWithSize; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -271,29 +266,29 @@ public void testGetProfilesEmptyUids() { @SuppressWarnings("unchecked") public void testGetProfileSubjectsNoIndex() throws Exception { when(profileIndex.indexExists()).thenReturn(false); - PlainActionFuture future = new PlainActionFuture<>(); + PlainActionFuture>> future = new PlainActionFuture<>(); profileService.getProfileSubjects(randomList(1, 5, () -> randomAlphaOfLength(20)), future); - ProfileService.MultiProfileSubjectResponse multiProfileSubjectResponse = future.get(); - assertThat(multiProfileSubjectResponse.profileUidToSubject().size(), is(0)); - assertThat(multiProfileSubjectResponse.failureProfileUids().size(), is(0)); + ResultsAndErrors> resultsAndErrors = future.get(); + assertThat(resultsAndErrors.results().size(), is(0)); + assertThat(resultsAndErrors.errors().size(), is(0)); when(profileIndex.indexExists()).thenReturn(true); ElasticsearchException unavailableException = new ElasticsearchException("mock profile index unavailable"); when(profileIndex.isAvailable()).thenReturn(false); when(profileIndex.getUnavailableReason()).thenReturn(unavailableException); - PlainActionFuture future2 = new PlainActionFuture<>(); + PlainActionFuture>> future2 = new PlainActionFuture<>(); profileService.getProfileSubjects(randomList(1, 5, () -> randomAlphaOfLength(20)), future2); ExecutionException e = expectThrows(ExecutionException.class, () -> future2.get()); assertThat(e.getCause(), is(unavailableException)); - PlainActionFuture future3 = new PlainActionFuture<>(); + PlainActionFuture>> future3 = new PlainActionFuture<>(); profileService.getProfileSubjects(List.of(), future3); - multiProfileSubjectResponse = future3.get(); - assertThat(multiProfileSubjectResponse.profileUidToSubject().size(), is(0)); - assertThat(multiProfileSubjectResponse.failureProfileUids().size(), is(0)); + resultsAndErrors = future3.get(); + assertThat(resultsAndErrors.results().size(), is(0)); + assertThat(resultsAndErrors.errors().size(), is(0)); verify(profileIndex, never()).checkIndexVersionThenExecute(any(Consumer.class), any(Runnable.class)); } @SuppressWarnings("unchecked") - public void testGetProfileSubjectsWithMissingNoFailures() throws Exception { + public void testGetProfileSubjectsWithMissingUids() throws Exception { final Collection allProfileUids = randomList(1, 5, () -> randomAlphaOfLength(20)); final Collection missingProfileUids = randomSubsetOf(allProfileUids); doAnswer(invocation -> { @@ -338,14 +333,19 @@ public void testGetProfileSubjectsWithMissingNoFailures() throws Exception { return null; }).when(client).execute(eq(MultiGetAction.INSTANCE), any(MultiGetRequest.class), anyActionListener()); - final PlainActionFuture future = new PlainActionFuture<>(); + final PlainActionFuture>> future = new PlainActionFuture<>(); profileService.getProfileSubjects(allProfileUids, future); - ProfileService.MultiProfileSubjectResponse multiProfileSubjectResponse = future.get(); + ResultsAndErrors> resultsAndErrors = future.get(); verify(profileIndex).checkIndexVersionThenExecute(any(Consumer.class), any(Runnable.class)); - assertThat(multiProfileSubjectResponse.failureProfileUids().isEmpty(), is(true)); - assertThat(multiProfileSubjectResponse.profileUidToSubject().size(), is(allProfileUids.size() - missingProfileUids.size())); - for (Map.Entry profileIdAndSubject : multiProfileSubjectResponse.profileUidToSubject().entrySet()) { + assertThat(resultsAndErrors.errors().size(), equalTo(missingProfileUids.size())); + resultsAndErrors.errors().forEach((uid, e) -> { + assertThat(missingProfileUids, hasItem(uid)); + assertThat(e, instanceOf(ResourceNotFoundException.class)); + }); + + assertThat(resultsAndErrors.results().size(), is(allProfileUids.size() - missingProfileUids.size())); + for (Map.Entry profileIdAndSubject : resultsAndErrors.results()) { assertThat(allProfileUids, hasItem(profileIdAndSubject.getKey())); assertThat(missingProfileUids, not(hasItem(profileIdAndSubject.getKey()))); assertThat(profileIdAndSubject.getValue().getUser().principal(), is("foo_username_" + profileIdAndSubject.getKey())); @@ -366,29 +366,13 @@ public void testGetProfileSubjectWithFailures() throws Exception { listener.onFailure(mGetException); return null; }).when(client).execute(eq(MultiGetAction.INSTANCE), any(MultiGetRequest.class), anyActionListener()); - final PlainActionFuture future = new PlainActionFuture<>(); + final PlainActionFuture>> future = new PlainActionFuture<>(); profileService.getProfileSubjects(randomList(1, 5, () -> randomAlphaOfLength(20)), future); ExecutionException e = expectThrows(ExecutionException.class, () -> future.get()); assertThat(e.getCause(), is(mGetException)); - final Collection missingProfileUids = randomList(1, 5, () -> randomAlphaOfLength(20)); - final Collection errorProfileUids = randomSubsetOf(missingProfileUids); - final MockLogAppender mockLogAppender = new MockLogAppender(); - if (false == errorProfileUids.isEmpty()) { - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "message", - "org.elasticsearch.xpack.security.profile.ProfileService", - Level.DEBUG, - "Failed to retrieve profiles " - + missingProfileUids.stream() - .filter(v -> errorProfileUids.contains(v)) - .collect(Collectors.toCollection(TreeSet::new)) - ) - ); - } - mockLogAppender.start(); - final Logger logger = LogManager.getLogger(ProfileService.class); - Loggers.setLevel(logger, Level.DEBUG); + final Collection allProfileUids = randomList(1, 5, () -> randomAlphaOfLength(20)); + final Collection errorProfileUids = randomSubsetOf(allProfileUids); + final Collection missingProfileUids = Sets.difference(Set.copyOf(allProfileUids), Set.copyOf(errorProfileUids)); doAnswer(invocation -> { assertThat( threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), @@ -416,19 +400,15 @@ public void testGetProfileSubjectWithFailures() throws Exception { return null; }).when(client).execute(eq(MultiGetAction.INSTANCE), any(MultiGetRequest.class), anyActionListener()); - try { - Loggers.addAppender(logger, mockLogAppender); - final PlainActionFuture future2 = new PlainActionFuture<>(); - profileService.getProfileSubjects(missingProfileUids, future2); - - ProfileService.MultiProfileSubjectResponse multiProfileSubjectResponse = future2.get(); - assertThat(multiProfileSubjectResponse.profileUidToSubject().isEmpty(), is(true)); - assertThat(multiProfileSubjectResponse.failureProfileUids(), containsInAnyOrder(errorProfileUids.toArray(String[]::new))); - mockLogAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(logger, mockLogAppender); - mockLogAppender.stop(); - } + final PlainActionFuture>> future2 = new PlainActionFuture<>(); + profileService.getProfileSubjects(allProfileUids, future2); + + ResultsAndErrors> resultsAndErrors = future2.get(); + assertThat(resultsAndErrors.results().isEmpty(), is(true)); + assertThat(resultsAndErrors.errors().size(), equalTo(allProfileUids.size())); + assertThat(resultsAndErrors.errors().keySet(), equalTo(Set.copyOf(allProfileUids))); + missingProfileUids.forEach(uid -> assertThat(resultsAndErrors.errors().get(uid), instanceOf(ResourceNotFoundException.class))); + errorProfileUids.forEach(uid -> assertThat(resultsAndErrors.errors().get(uid), instanceOf(ElasticsearchException.class))); } public void testActivateProfileShouldFailIfSubjectTypeIsNotUser() { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/user_profile/40_has_privileges.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/user_profile/40_has_privileges.yml index 5f017c223243f..24ef7fdd10478 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/user_profile/40_has_privileges.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/user_profile/40_has_privileges.yml @@ -137,6 +137,10 @@ teardown: - length: { has_privilege_uids: 2 } - match: { has_privilege_uids.0 : "/^(${profile_uid1}|${profile_uid2})$/" } - match: { has_privilege_uids.1 : "/^(${profile_uid1}|${profile_uid2})$/" } + - match: { errors.count: 1 } + - length: { errors.details: 1 } + - is_true: errors.details.dummy_missing + - match: { errors.details.dummy_missing.type: "resource_not_found_exception" } - do: security.has_privileges_user_profile: @@ -153,6 +157,7 @@ teardown: - all - read - length: { "has_privilege_uids": 0 } + - is_false: errors - do: security.has_privileges_user_profile: @@ -174,3 +179,7 @@ teardown: - read - length: { "has_privilege_uids": 1 } - match: { "has_privilege_uids.0" : $profile_uid1 } + - match: { errors.count: 1 } + - length: { errors.details: 1 } + - is_true: errors.details.dummy + - match: { errors.details.dummy.type: "resource_not_found_exception" } From 3bb13e245ebcfb2032428cd8618c90967de2550f Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 18 Aug 2022 13:04:43 +1000 Subject: [PATCH 252/265] Return 400 error for GetUserPrivileges call with API keys (#89333) The GetUserPrivileges API returns a 500 error when it is called with an API key that has assigned role descriptors. This is because the underlying LimitedRole class that represents the API key's effective privileges does not support building a simple view of the privileges. This PR changes the code to return 400 error instead of 500 along with a better error message that suggests the GetApiKey API as an alternative. Relates: #89058 --- docs/changelog/89333.yaml | 5 ++ .../xpack/security/apikey/ApiKeyRestIT.java | 83 +++++++++++++++++++ .../xpack/security/authz/RBACEngine.java | 15 +++- .../xpack/security/authz/RBACEngineTests.java | 38 +++++++++ 4 files changed, 140 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/89333.yaml diff --git a/docs/changelog/89333.yaml b/docs/changelog/89333.yaml new file mode 100644 index 0000000000000..72ff86e5fa742 --- /dev/null +++ b/docs/changelog/89333.yaml @@ -0,0 +1,5 @@ +pr: 89333 +summary: Return 400 error for `GetUserPrivileges` call with API keys +area: Security +type: enhancement +issues: [] diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index ebc35a9b5a11d..880a4dcade3f0 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -12,10 +12,12 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; @@ -490,6 +492,87 @@ public void testGrantorCannotUpdateApiKeyOfGrantTarget() throws IOException { ); } + public void testGetPrivilegesForApiKeyWorksIfItDoesNotHaveAssignedPrivileges() throws IOException { + final Request createApiKeyRequest = new Request("POST", "_security/api_key"); + if (randomBoolean()) { + createApiKeyRequest.setJsonEntity(""" + { "name": "k1" }"""); + } else { + createApiKeyRequest.setJsonEntity(""" + { + "name": "k1", + "role_descriptors": { } + }"""); + } + final Response createApiKeyResponse = adminClient().performRequest(createApiKeyRequest); + assertOK(createApiKeyResponse); + + final Request getPrivilegesRequest = new Request("GET", "_security/user/_privileges"); + getPrivilegesRequest.setOptions( + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + responseAsMap(createApiKeyResponse).get("encoded")) + ); + final Response getPrivilegesResponse = client().performRequest(getPrivilegesRequest); + assertOK(getPrivilegesResponse); + + assertThat(responseAsMap(getPrivilegesResponse), equalTo(XContentHelper.convertToMap(JsonXContent.jsonXContent, """ + { + "cluster": [ + "all" + ], + "global": [], + "indices": [ + { + "names": [ + "*" + ], + "privileges": [ + "all" + ], + "allow_restricted_indices": true + } + ], + "applications": [ + { + "application": "*", + "privileges": [ + "*" + ], + "resources": [ + "*" + ] + } + ], + "run_as": [ + "*" + ] + }""", false))); + } + + public void testGetPrivilegesForApiKeyThrows400IfItHasAssignedPrivileges() throws IOException { + final Request createApiKeyRequest = new Request("POST", "_security/api_key"); + createApiKeyRequest.setJsonEntity(""" + { + "name": "k1", + "role_descriptors": { "a": { "cluster": ["monitor"] } } + }"""); + final Response createApiKeyResponse = adminClient().performRequest(createApiKeyRequest); + assertOK(createApiKeyResponse); + + final Request getPrivilegesRequest = new Request("GET", "_security/user/_privileges"); + getPrivilegesRequest.setOptions( + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + responseAsMap(createApiKeyResponse).get("encoded")) + ); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(getPrivilegesRequest)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat( + e.getMessage(), + containsString( + "Cannot retrieve privileges for API keys with assigned role descriptors. " + + "Please use the Get API key information API https://ela.st/es-api-get-api-key" + ) + ); + } + private void doTestAuthenticationWithApiKey(final String apiKeyName, final String apiKeyId, final String apiKeyEncoded) throws IOException { final var authenticateRequest = new Request("GET", "_security/_authenticate"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index 839d3a5437c39..e4c2c050d3d45 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -658,7 +658,20 @@ public void getUserPrivileges(AuthorizationInfo authorizationInfo, ActionListene ); } else { final Role role = ((RBACAuthorizationInfo) authorizationInfo).getRole(); - listener.onResponse(buildUserPrivilegesResponseObject(role)); + final GetUserPrivilegesResponse getUserPrivilegesResponse; + try { + getUserPrivilegesResponse = buildUserPrivilegesResponseObject(role); + } catch (UnsupportedOperationException e) { + listener.onFailure( + new IllegalArgumentException( + "Cannot retrieve privileges for API keys with assigned role descriptors. " + + "Please use the Get API key information API https://ela.st/es-api-get-api-key", + e + ) + ); + return; + } + listener.onResponse(getUserPrivilegesResponse); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index ec4a6c7e6d0f0..d2376a0172023 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -58,10 +58,14 @@ import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine.PrivilegesToCheck; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.permission.ApplicationPermission; +import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; import org.elasticsearch.xpack.core.security.authz.permission.ResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.permission.RunAsPermission; import org.elasticsearch.xpack.core.security.authz.permission.SimpleRole; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; @@ -105,6 +109,7 @@ import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; @@ -1505,6 +1510,39 @@ public void testLazinessForAuthorizedIndicesSet() { verify(supplier, never()).get(); } + public void testGetUserPrivilegesThrowsIaeForUnsupportedOperation() { + final RBACAuthorizationInfo authorizationInfo = mock(RBACAuthorizationInfo.class); + final Role role = mock(Role.class); + when(authorizationInfo.getRole()).thenReturn(role); + when(role.cluster()).thenReturn(ClusterPermission.NONE); + when(role.indices()).thenReturn(IndicesPermission.NONE); + when(role.application()).thenReturn(ApplicationPermission.NONE); + when(role.runAs()).thenReturn(RunAsPermission.NONE); + + final UnsupportedOperationException unsupportedOperationException = new UnsupportedOperationException(); + switch (randomIntBetween(0, 3)) { + case 0 -> when(role.cluster()).thenThrow(unsupportedOperationException); + case 1 -> when(role.indices()).thenThrow(unsupportedOperationException); + case 2 -> when(role.application()).thenThrow(unsupportedOperationException); + case 3 -> when(role.runAs()).thenThrow(unsupportedOperationException); + default -> throw new IllegalStateException("unknown case number"); + } + + final PlainActionFuture future = new PlainActionFuture<>(); + engine.getUserPrivileges(authorizationInfo, future); + + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, future::actionGet); + + assertThat( + e.getMessage(), + equalTo( + "Cannot retrieve privileges for API keys with assigned role descriptors. " + + "Please use the Get API key information API https://ela.st/es-api-get-api-key" + ) + ); + assertThat(e.getCause(), sameInstance(unsupportedOperationException)); + } + private GetUserPrivilegesResponse.Indices findIndexPrivilege(Set indices, String name) { return indices.stream().filter(i -> i.getIndices().contains(name)).findFirst().get(); } From 1403ab318b14ad39030c6824237e1477a9392dd8 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Thu, 18 Aug 2022 14:10:15 +0200 Subject: [PATCH 253/265] Remove redundant cluster upgrade tests for auth tokens (#89417) This PR removes YAML cluster upgrade tests for token backwards compatibility. The Java REST test suite TokenBackwardsCompatibilityIT fully covers all scenarios included in the YAML tests already. Due to the complex test setup, and since this particular YAML test suite is not meant to be tested with different clients, Java REST tests are more appropriate. The test suite we are removing is resource intensive, both in terms of build times and maintenance, and as such not justified as a redundant suite. Relates: #77350 --- .../test/mixed_cluster/50_token_auth.yml | 180 ------------------ .../test/old_cluster/50_token_auth.yml | 130 ------------- .../test/upgraded_cluster/50_token_auth.yml | 92 --------- 3 files changed, 402 deletions(-) delete mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml delete mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/50_token_auth.yml delete mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml deleted file mode 100644 index 9fac41b569048..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml +++ /dev/null @@ -1,180 +0,0 @@ ---- -"Get the indexed token and use if to authenticate": - - skip: - features: headers - - - do: - cluster.health: - wait_for_status: yellow - - - do: - get: - index: token_index - id: "6" - - - match: { _index: token_index } - - match: { _id: "6" } - - is_true: _source.token - - set: { _source.token : token } - - - do: - headers: - Authorization: Bearer ${token} - security.authenticate: {} - - - match: { username: "token_user" } - - match: { roles.0: "superuser" } - - match: { full_name: "Token User" } - - # call three times because the client rotates the nodes - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 8 } - - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 8 } - - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 8 } - ---- -"Get the indexed refreshed access token and use if to authenticate": - - skip: - features: headers - - - do: - get: - index: token_index - id: "7" - - - match: { _index: token_index } - - match: { _id: "7" } - - is_true: _source.token - - set: { _source.token : token } - - - do: - headers: - Authorization: Bearer ${token} - security.authenticate: {} - - - match: { username: "token_user" } - - match: { roles.0: "superuser" } - - match: { full_name: "Token User" } - - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 8 } - - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 8 } - - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 8 } - ---- -"Get the indexed refresh token and use it to get another access token and authenticate": - - skip: - features: headers - - - do: - get: - index: token_index - id: "8" - - - match: { _index: token_index } - - match: { _id: "8" } - - is_true: _source.token - - set: { _source.token : refresh_token } - - - do: - security.get_token: - body: - grant_type: "refresh_token" - refresh_token: "${refresh_token}" - - - match: { type: "Bearer" } - - is_true: access_token - - set: { access_token: token } - - is_true: refresh_token - - set: { refresh_token: refresh_token } - - match: { expires_in: 3600 } - - is_false: scope - - - do: - headers: - Authorization: Bearer ${token} - security.authenticate: {} - - - match: { username: "token_user" } - - match: { roles.0: "superuser" } - - match: { full_name: "Token User" } - - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 8 } - - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 8 } - - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 8 } - - # overwrite the used refresh token with the new one - - do: - headers: - Authorization: Bearer ${token} - index: - index: token_index - id: "8" - body: { "token" : "${refresh_token}"} diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/50_token_auth.yml deleted file mode 100644 index 2f44c37a37f98..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/50_token_auth.yml +++ /dev/null @@ -1,130 +0,0 @@ ---- -"Create a token and reuse it across the upgrade": - - skip: - features: headers - - - do: - cluster.health: - wait_for_status: yellow - - - do: - security.put_user: - username: "token_user" - body: > - { - "password" : "x-pack-test-password", - "roles" : [ "superuser" ], - "full_name" : "Token User" - } - - - do: - security.get_token: - body: - grant_type: "password" - username: "token_user" - password: "x-pack-test-password" - - - match: { type: "Bearer" } - - is_true: access_token - - set: { access_token: token } - - is_true: refresh_token - - set: { refresh_token: refresh_token } - - match: { expires_in: 3600 } - - is_false: scope - - - do: - headers: - Authorization: Bearer ${token} - security.authenticate: {} - - - match: { username: "token_user" } - - match: { roles.0: "superuser" } - - match: { full_name: "Token User" } - - - do: - indices.create: - index: token_index - wait_for_active_shards : all - body: - settings: - index: - number_of_replicas: 1 - - - do: - headers: - Authorization: Bearer ${token} - bulk: - refresh: true - body: - - '{"index": {"_index": "token_index", "_id" : "1"}}' - - '{"f1": "v1_old", "f2": 0}' - - '{"index": {"_index": "token_index", "_id" : "2"}}' - - '{"f1": "v2_old", "f2": 1}' - - '{"index": {"_index": "token_index", "_id" : "3"}}' - - '{"f1": "v3_old", "f2": 2}' - - '{"index": {"_index": "token_index", "_id" : "4"}}' - - '{"f1": "v4_old", "f2": 3}' - - '{"index": {"_index": "token_index", "_id" : "5"}}' - - '{"f1": "v5_old", "f2": 4}' - - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 5 } - - # we do store the token in the index such that we can reuse it down the road once - # the cluster is upgraded - - do: - headers: - Authorization: Bearer ${token} - index: - index: token_index - id: "6" - body: { "token" : "${token}"} - - # refresh token and store it as well - - do: - security.get_token: - body: - grant_type: "refresh_token" - refresh_token: "${refresh_token}" - - - match: { type: "Bearer" } - - is_true: access_token - - set: { access_token: refreshed_access_token } - - is_true: refresh_token - - set: { refresh_token: refreshed_refresh_token } - - match: { expires_in: 3600 } - - is_false: scope - - # test refresh token (use it) - - do: - headers: - Authorization: Bearer ${refreshed_access_token} - security.authenticate: {} - - - match: { username: "token_user" } - - match: { roles.0: "superuser" } - - match: { full_name: "Token User" } - - # store the new refreshed access token - - do: - headers: - Authorization: Bearer ${refreshed_access_token} - index: - index: token_index - id: "7" - body: { "token" : "${refreshed_access_token}"} - - # store the refresh token - - do: - headers: - Authorization: Bearer ${refreshed_access_token} - index: - index: token_index - id: "8" - body: { "token" : "${refreshed_refresh_token}"} diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml deleted file mode 100644 index 2ca983fc031bc..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml +++ /dev/null @@ -1,92 +0,0 @@ ---- -"Get the indexed token and use if to authenticate": - - skip: - features: headers - - - do: - cluster.health: - wait_for_status: yellow - - - do: - get: - index: token_index - id: "6" - - - match: { _index: token_index } - - match: { _id: "6" } - - is_true: _source.token - - set: { _source.token : token } - - - do: - headers: - Authorization: Bearer ${token} - security.authenticate: {} - - - match: { username: "token_user" } - - match: { roles.0: "superuser" } - - match: { full_name: "Token User" } - - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 8 } - - # counter example that we are really checking this - - do: - headers: - Authorization: Bearer boom - catch: /unable to authenticate with provided credentials and anonymous access is not allowed for this request/ - search: - rest_total_hits_as_int: true - index: token_index - ---- -"Get the indexed refresh token and use if to get another access token and authenticate": - - skip: - features: headers - - - do: - get: - index: token_index - id: "8" - - - match: { _index: token_index } - - match: { _id: "8" } - - is_true: _source.token - - set: { _source.token : refresh_token } - - - do: - security.get_token: - body: - grant_type: "refresh_token" - refresh_token: "${refresh_token}" - - - match: { type: "Bearer" } - - is_true: access_token - - set: { access_token: token } - - is_true: refresh_token - - set: { refresh_token: refresh_token } - - match: { expires_in: 3600 } - - is_false: scope - - - do: - headers: - Authorization: Bearer ${token} - security.authenticate: {} - - - match: { username: "token_user" } - - match: { roles.0: "superuser" } - - match: { full_name: "Token User" } - - - do: - headers: - Authorization: Bearer ${token} - search: - rest_total_hits_as_int: true - index: token_index - - - match: { hits.total: 8 } From c238aa1b4645b832888d06e759f9212d7c4d9eda Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 18 Aug 2022 13:50:13 +0100 Subject: [PATCH 254/265] Add YAML spec docs about matching errors (#89370) It's not obvious that a YAML test with a `catch` stanza also permits `match` blocks to assert things about the structure of the error response, but this structure may be an important part of the API spec. This commit adds this info to the docs about YAML tests. --- .../resources/rest-api-spec/test/README.asciidoc | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc index 0544ec2bf8a61..f2f04e219afdf 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc @@ -320,8 +320,15 @@ be caught and tested. For instance: catch: missing get: index: test - type: test + type: test id: 1 + +# And, optionally, you can assert on the contents of the precise contents of the error message: + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "The request contained an illegal argument" } + - match: { error.caused_by.reason: "The argument was illegal because ..." } + - match: { error.root_cause.0.type: "illegal_argument_exception" } .... The argument to `catch` can be any of: From 18328b014f957c7cf7d4846b1b16b1feed4052e5 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 18 Aug 2022 14:09:09 +0100 Subject: [PATCH 255/265] Remove LegacyClusterTaskResultActionListener (#89459) Also removes the now-unused legacy method `ClusterStateTaskListener#onClusterStateProcessed`. Closes #83784 --- .../indices/create/AutoCreateAction.java | 5 --- .../rollover/TransportRolloverAction.java | 6 --- .../cluster/ClusterStateTaskExecutor.java | 37 ++-------------- .../cluster/ClusterStateTaskListener.java | 16 ------- .../cluster/ClusterStateUpdateTask.java | 24 ++++------ .../cluster/LocalMasterServiceTask.java | 5 --- .../action/shard/ShardStateAction.java | 11 ----- .../cluster/coordination/JoinTask.java | 6 --- .../NodeRemovalClusterStateTaskExecutor.java | 5 --- .../metadata/MetadataIndexStateService.java | 28 ------------ .../MetadataIndexTemplateService.java | 5 --- .../MetadataUpdateSettingsService.java | 5 --- .../metadata/HealthMetadataService.java | 5 --- .../elasticsearch/ingest/IngestService.java | 5 --- .../snapshots/SnapshotsService.java | 5 --- .../coordination/CoordinatorTests.java | 7 ++- ...etadataIndexStateServiceBatchingTests.java | 7 --- .../cluster/service/MasterServiceTests.java | 44 ------------------- .../AbstractCoordinatorTestCase.java | 10 +++-- .../license/StartBasicClusterTask.java | 5 --- .../license/StartTrialClusterTask.java | 5 --- .../IndexLifecycleClusterStateUpdateTask.java | 6 +-- .../xpack/ilm/IndexLifecycleRunner.java | 2 +- .../rollup/v2/TransportRollupAction.java | 5 --- 24 files changed, 26 insertions(+), 233 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index 94611ea9ec9ed..9f3dc1d4a4824 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -161,11 +161,6 @@ public void onFailure(Exception e) { listener.onFailure(e); } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "should not be called"; - } - private ClusterStateAckListener getAckListener(String indexName) { return new ClusterStateAckListener() { @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index cd11ecfbd30d3..c5d6ce67f24d1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -241,16 +241,10 @@ record RolloverTask( RolloverResponse trialRolloverResponse, ActionListener listener ) implements ClusterStateTaskListener { - @Override public void onFailure(Exception e) { listener.onFailure(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "not called"; - } } record RolloverExecutor( diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index 69a54e5068a5b..02192b01991ce 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -73,23 +73,6 @@ default String describeTasks(List tasks) { return output.toString(); } - /** - * A {@link Consumer} for passing to {@link ClusterStateTaskExecutor.TaskContext#success} which preserves the - * legacy behaviour of calling {@link ClusterStateTaskListener#clusterStateProcessed} or {@link ClusterStateTaskListener#onFailure}. - *

    - * New implementations should use a dedicated listener rather than relying on this legacy behaviour. - */ - // TODO remove all remaining usages of this listener - @Deprecated - record LegacyClusterTaskResultActionListener(ClusterStateTaskListener task, ClusterState originalState) - implements - Consumer { - @Override - public void accept(ClusterState publishedState) { - task.clusterStateProcessed(originalState, publishedState); - } - } - /** * A task to be executed, along with callbacks for the executor to record the outcome of this task's execution. The executor must * call exactly one of these methods for every task in its batch. @@ -108,10 +91,7 @@ interface TaskContext { * method and must instead call {@link #success(Runnable, ClusterStateAckListener)}, passing the task itself as the {@code * clusterStateAckListener} argument. * - * @param onPublicationSuccess An action executed when (if?) the cluster state update succeeds. The task's {@link - * ClusterStateTaskListener#clusterStateProcessed} method is not called directly by the master - * service once the task execution has succeeded, but legacy implementations may supply a listener - * which calls this methods. + * @param onPublicationSuccess An action executed when (if?) the cluster state update succeeds. */ void success(Runnable onPublicationSuccess); @@ -122,10 +102,7 @@ interface TaskContext { * method and must instead call {@link #success(Consumer, ClusterStateAckListener)}, passing the task itself as the {@code * clusterStateAckListener} argument. * - * @param publishedStateConsumer A consumer of the cluster state that was ultimately published. The task's {@link - * ClusterStateTaskListener#clusterStateProcessed} method is not called directly by the master - * service once the task execution has succeeded, but legacy implementations may supply a listener - * which calls this methods. + * @param publishedStateConsumer A consumer of the cluster state that was ultimately published. *

    * The consumer should prefer not to use the published state for things like determining the result * of a task. The task may have been executed as part of a batch, and later tasks in the batch may @@ -143,10 +120,7 @@ interface TaskContext { * Note that some tasks implement {@link ClusterStateAckListener} and can listen for acks themselves. If so, you must pass the task * itself as the {@code clusterStateAckListener} argument. * - * @param onPublicationSuccess An action executed when (if?) the cluster state update succeeds. The task's {@link - * ClusterStateTaskListener#clusterStateProcessed} method is not called directly by the master - * service once the task execution has succeeded, but legacy implementations may supply a listener - * which calls this methods. + * @param onPublicationSuccess An action executed when (if?) the cluster state update succeeds. * * @param clusterStateAckListener A listener for acknowledgements from nodes. If the publication succeeds then this listener is * completed as nodes ack the state update. If the publication fails then the failure @@ -160,10 +134,7 @@ interface TaskContext { * Note that some tasks implement {@link ClusterStateAckListener} and can listen for acks themselves. If so, you must pass the task * itself as the {@code clusterStateAckListener} argument. * - * @param publishedStateConsumer A consumer of the cluster state that was ultimately published. The task's {@link - * ClusterStateTaskListener#clusterStateProcessed} method is not called directly by the master - * service once the task execution has succeeded, but legacy implementations may supply a listener - * which calls this methods. + * @param publishedStateConsumer A consumer of the cluster state that was ultimately published. *

    * The consumer should prefer not to use the published state for things like determining the result * of a task. The task may have been executed as part of a batch, and later tasks in the batch may diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java index 1e383a6da7df6..85305529b6a4d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.service.MasterService; public interface ClusterStateTaskListener { - /** * A callback for when task execution fails. May receive a {@link NotMasterException} if this node stopped being the master before this * task was executed or a {@link ProcessClusterEventTimeoutException} if the task timed out before it was executed. If the task fails @@ -28,19 +27,4 @@ public interface ClusterStateTaskListener { * implementations must do so themselves, typically using a more specific logger and at a less dramatic log level. */ void onFailure(Exception e); - - /** - * Called when the result of the {@link ClusterStateTaskExecutor#execute} method has been processed properly by all listeners. - * - * The {@param newState} parameter is the state that was ultimately published. This can lead to surprising behaviour if tasks are - * batched together: a later task in the batch may undo or overwrite the changes made by an earlier task. In general you should prefer - * to ignore the published state and instead handle the success of a publication via the listener that the executor passes to - * {@link ClusterStateTaskExecutor.TaskContext#success}. - * - * Implementations of this callback must not throw exceptions: an exception thrown here is logged by the master service at {@code ERROR} - * level and otherwise ignored, except in tests where it raises an {@link AssertionError}. If log-and-ignore is the right behaviour then - * implementations must do so themselves, typically using a more specific logger and at a less dramatic log level. - */ - // TODO: replace all remaining usages of this method with dedicated listeners and then remove it. - default void clusterStateProcessed(ClusterState oldState, ClusterState newState) {} } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java index 1b8986b45f490..ac31d87ee67ca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java @@ -8,9 +8,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; -import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException; -import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Priority; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; @@ -50,20 +47,15 @@ public ClusterStateUpdateTask(Priority priority, TimeValue timeout) { public abstract ClusterState execute(ClusterState currentState) throws Exception; /** - * A callback for when task execution fails. May receive a {@link NotMasterException} if this node stopped being the master before this - * task was executed or a {@link ProcessClusterEventTimeoutException} if the task timed out before it was executed. If the task fails - * during execution then this method receives the corresponding exception. If the task executes successfully but the resulting cluster - * state publication fails then this method receives a {@link FailedToCommitClusterStateException}. If publication fails then a new - * master is elected and the update might or might not take effect, depending on whether or not the newly-elected master accepted the - * published state that failed to be committed. - *

    - * Use {@link MasterService#isPublishFailureException} to detect the "expected" master failure cases if needed. - *

    - * Implementations of this callback should not throw exceptions: an exception thrown here is logged by the master service at {@code - * ERROR} level and otherwise ignored. If log-and-ignore is the right behaviour then implementations should do so themselves, typically - * using a more specific logger and at a less dramatic log level. + * Called when the result of the {@link #execute} method has been processed properly by all listeners. + * + * The {@param newState} parameter is the state that was ultimately published. + * + * Implementations of this callback must not throw exceptions: an exception thrown here is logged by the master service at {@code ERROR} + * level and otherwise ignored, except in tests where it raises an {@link AssertionError}. If log-and-ignore is the right behaviour then + * implementations must do so themselves, typically using a more specific logger and at a less dramatic log level. */ - public abstract void onFailure(Exception e); + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) {} /** * If the cluster state update task wasn't processed by the provided timeout, call diff --git a/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java b/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java index 033f6d1f8dd39..5b1d0ff22e201 100644 --- a/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/LocalMasterServiceTask.java @@ -25,11 +25,6 @@ public LocalMasterServiceTask(Priority priority) { protected void execute(ClusterState currentState) {} - @Override - public final void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "not called"; - } - protected void onPublicationComplete() {} public void submit(MasterService masterService, String source) { diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 200a2024a610b..21747049cb57a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -538,7 +538,6 @@ public int hashCode() { public record FailedShardUpdateTask(FailedShardEntry entry, ActionListener listener) implements ClusterStateTaskListener { - @Override public void onFailure(Exception e) { logger.log( @@ -548,11 +547,6 @@ public void onFailure(Exception e) { ); listener.onFailure(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "should not be called"; - } } public void shardStarted( @@ -859,11 +853,6 @@ public void onFailure(Exception e) { listener.onFailure(e); } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "should not be called"; - } - @Override public String toString() { return "StartedShardUpdateTask{entry=" + entry + ", listener=" + listener + "}"; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java index 61cc9196c2a96..10121337a85ca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.coordination; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -38,11 +37,6 @@ public void onFailure(Exception e) { } } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "not called"; - } - @Override public String toString() { final StringBuilder stringBuilder = new StringBuilder(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java index af59b6a065109..7cf929d6da87a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java @@ -32,11 +32,6 @@ public void onFailure(final Exception e) { logger.log(MasterService.isPublishFailureException(e) ? Level.DEBUG : Level.ERROR, "unexpected failure during [node-left]", e); } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "not called"; - } - @Override public String toString() { final StringBuilder stringBuilder = new StringBuilder(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index 1243d9bb26d19..827ab9d9d81d1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -211,16 +211,10 @@ public ClusterState execute(BatchExecutionContext batchExe private record AddBlocksToCloseTask(CloseIndexClusterStateUpdateRequest request, ActionListener listener) implements ClusterStateTaskListener { - @Override public void onFailure(Exception e) { listener.onFailure(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "not called"; - } } private class CloseIndicesExecutor implements ClusterStateTaskExecutor { @@ -292,16 +286,10 @@ private record CloseIndicesTask( Map verifyResults, ActionListener listener ) implements ClusterStateTaskListener { - @Override public void onFailure(Exception e) { listener.onFailure(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "not called"; - } } /** @@ -547,11 +535,6 @@ private record AddBlocksTask(AddIndexBlockClusterStateUpdateRequest request, Act public void onFailure(Exception e) { listener.onFailure(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "not called"; - } } private static class FinalizeBlocksExecutor implements ClusterStateTaskExecutor { @@ -592,16 +575,10 @@ private record FinalizeBlocksTask( Map verifyResults, ActionListener listener ) implements ClusterStateTaskListener { - @Override public void onFailure(Exception e) { listener.onFailure(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "not called"; - } } /** @@ -1243,10 +1220,5 @@ public void onAckTimeout() { public TimeValue ackTimeout() { return request.ackTimeout(); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "not called"; - } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 3dc021cea6daa..e1ff12280222c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -158,11 +158,6 @@ private abstract static class TemplateClusterStateUpdateTask implements ClusterS public void onFailure(Exception e) { listener.onFailure(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "not called"; - } } @Inject diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index 8ec1e210d9d20..1634c860cf547 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -136,11 +136,6 @@ public void onFailure(Exception e) { listener.onFailure(e); } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "should not be called"; - } - ClusterState execute(ClusterState currentState) { final Settings normalizedSettings = Settings.builder() .put(request.settings()) diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java index 91aeab6799da2..3d5ddb2e36d17 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java @@ -155,11 +155,6 @@ public static List getNamedWriteables() { */ abstract static class UpsertHealthMetadataTask implements ClusterStateTaskListener { - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "never called"; - } - @Override public void onFailure(@Nullable Exception e) { logger.error("failure during health metadata update", e); diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 5f66dbb2daa07..d2e671550a39a 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -146,11 +146,6 @@ abstract static class PipelineClusterStateUpdateTask implements ClusterStateTask public void onFailure(Exception e) { listener.onFailure(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "should not be called"; - } } public IngestService( diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index f6a617c5c8011..62b207e252b87 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -3381,11 +3381,6 @@ public void onFailure(Exception e) { listener.onFailure(e); } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "never called"; - } - @Override public boolean equals(Object other) { if (this == other) { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 1dedcd06b72d6..2742b521ebdb5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.coordination.AbstractCoordinatorTestCase.Cluster.ClusterNode; @@ -1133,7 +1132,7 @@ public void testMasterStatsOnNoOpUpdate() { leader.submitUpdateTask("unchanged", cs -> { computeAdvancer.advanceTime(); return cs; - }, new ClusterStateTaskListener() { + }, new CoordinatorTestClusterStateUpdateTask() { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { notifyAdvancer.advanceTime(); @@ -1220,7 +1219,7 @@ protected List extraNamedWriteables() { leader.submitUpdateTask("update", cs -> { computeAdvancer.advanceTime(); return ClusterState.builder(cs).putCustom(customName, new DelayedCustom(contextAdvancer)).build(); - }, new ClusterStateTaskListener() { + }, new CoordinatorTestClusterStateUpdateTask() { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { notifyAdvancer.advanceTime(); @@ -1281,7 +1280,7 @@ public void testMasterStatsOnFailedUpdate() { leader.submitUpdateTask("update", cs -> { computeAdvancer.advanceTime(); return ClusterState.builder(cs).build(); - }, new ClusterStateTaskListener() { + }, new CoordinatorTestClusterStateUpdateTask() { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { fail("shouldn't have processed cluster state"); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java index e52a3726cc046..3e61fb7fe9855 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskListener; @@ -235,11 +234,5 @@ private static class ExpectSuccessTask implements ClusterStateTaskListener { public void onFailure(Exception e) { throw new AssertionError("should not be called", e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - // see parent method javadoc, we use dedicated listeners rather than calling this method - throw new AssertionError("should not be called"); - } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index cd7aa8d8918ca..b9bfe2ff4e5ba 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -667,11 +667,6 @@ public void onFailure(Exception e) { throw new AssertionError("should not be called", e); } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - throw new AssertionError("should not be called"); - } - @Override public boolean equals(Object o) { if (this == o) { @@ -849,11 +844,6 @@ class Task implements ClusterStateTaskListener { this.expectedHeaderValue = expectedHeaderValue; } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - throw new AssertionError("should not complete task"); - } - @Override public void onFailure(Exception e) { assertThat(e, instanceOf(RuntimeException.class)); @@ -927,11 +917,6 @@ class Task implements ClusterStateTaskListener { this.publishListener = publishListener; } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - throw new AssertionError("should not complete task"); - } - @Override public void onFailure(Exception e) { publishListener.onFailure(e); @@ -1419,11 +1404,6 @@ class Task extends LatchAckListener implements ClusterStateTaskListener { public void onFailure(Exception e) { throw new AssertionError(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - fail(); - } } masterService.submitStateUpdateTask( @@ -1466,16 +1446,10 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) }); class Task implements ClusterStateTaskListener { - @Override public void onFailure(Exception e) { throw new AssertionError(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - fail(); - } } masterService.submitStateUpdateTask( @@ -1508,16 +1482,10 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) }); class Task implements ClusterStateTaskListener { - @Override public void onFailure(Exception e) { throw new AssertionError(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - fail(); - } } masterService.submitStateUpdateTask( @@ -1550,16 +1518,10 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) }); class Task implements ClusterStateTaskListener { - @Override public void onFailure(Exception e) { throw new AssertionError(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - fail(); - } } masterService.submitStateUpdateTask( @@ -1991,11 +1953,5 @@ private static class ExpectSuccessTask implements ClusterStateTaskListener { public void onFailure(Exception e) { throw new AssertionError("should not be called", e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - // see parent method javadoc, we use dedicated listeners rather than calling this method - throw new AssertionError("should not be called"); - } } } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index 3f820e9772518..0f4df5fd9b442 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -1435,7 +1435,7 @@ AckCollector submitValue(final int key, final long value) { return submitUpdateTask( "new value [" + key + "=" + value + "]", cs -> setValue(cs, key, value), - new ClusterStateTaskListener() { + new CoordinatorTestClusterStateUpdateTask() { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { history.respond(eventId, value(oldState, key)); @@ -1460,7 +1460,7 @@ public void onFailure(Exception e) { void readValue(int key) { final int eventId = history.invoke(new Tuple<>(key, null)); - submitUpdateTask("read value", cs -> ClusterState.builder(cs).build(), new ClusterStateTaskListener() { + submitUpdateTask("read value", cs -> ClusterState.builder(cs).build(), new CoordinatorTestClusterStateUpdateTask() { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { history.respond(eventId, value(newState, key)); @@ -1478,7 +1478,7 @@ public void onFailure(Exception e) { AckCollector submitUpdateTask( String source, UnaryOperator clusterStateUpdate, - ClusterStateTaskListener taskListener + CoordinatorTestClusterStateUpdateTask taskListener ) { final AckCollector ackCollector = new AckCollector(); onNode(() -> { @@ -1956,4 +1956,8 @@ void clear() { } } + + public interface CoordinatorTestClusterStateUpdateTask extends ClusterStateTaskListener { + default void clusterStateProcessed(ClusterState oldState, ClusterState newState) {} + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java index 5c81fde14bd26..b9356c71a212f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java @@ -49,11 +49,6 @@ public class StartBasicClusterTask implements ClusterStateTaskListener { this.clock = clock; } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "never called"; - } - public LicensesMetadata execute( LicensesMetadata currentLicensesMetadata, DiscoveryNodes discoveryNodes, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index bb4bb6adcb073..b6431ecd77c3a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -56,11 +56,6 @@ public class StartTrialClusterTask implements ClusterStateTaskListener { this.clock = clock; } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "never called"; - } - private LicensesMetadata execute( LicensesMetadata currentLicensesMetadata, DiscoveryNodes discoveryNodes, diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleClusterStateUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleClusterStateUpdateTask.java index 196e703540abd..193a4b110c596 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleClusterStateUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleClusterStateUpdateTask.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.index.Index; import org.elasticsearch.xpack.core.ilm.Step; @@ -52,7 +53,6 @@ public final ClusterState execute(ClusterState currentState) throws Exception { protected abstract ClusterState doExecute(ClusterState currentState) throws Exception; - @Override public final void clusterStateProcessed(ClusterState oldState, ClusterState newState) { listener.onResponse(null); if (executed) { @@ -76,9 +76,9 @@ public final void addListener(ActionListener actionListener) { } /** - * This method is functionally the same as {@link ClusterStateTaskListener#clusterStateProcessed(ClusterState, ClusterState)} + * This method is functionally the same as {@link ClusterStateUpdateTask#clusterStateProcessed} * and implementations can override it as they would override {@code ClusterStateUpdateTask#clusterStateProcessed}. - * The only difference to {@code ClusterStateUpdateTask#clusterStateProcessed} is that if the {@link #execute(ClusterState)} + * The only difference to {@link ClusterStateUpdateTask#clusterStateProcessed} is that if the {@link #execute(ClusterState)} * implementation was a noop and returned the input cluster state, then this method will not be invoked. */ protected void onClusterStateProcessed(ClusterState newState) {} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java index 68208dc808465..c1e1fdfe130ab 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java @@ -69,7 +69,7 @@ public ClusterState execute(BatchExecutionContext task.clusterStateProcessed(batchExecutionContext.initialState(), publishedState) ); } catch (Exception e) { taskContext.onFailure(e); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java index e70a99e47bd2f..80c8f84b63823 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/TransportRollupAction.java @@ -658,10 +658,5 @@ private abstract static class RollupClusterStateUpdateTask implements ClusterSta public void onFailure(Exception e) { listener.onFailure(e); } - - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - assert false : "not called"; - } } } From c541610fb59a3463b8e1bd10d5eccac5123ad4f6 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Thu, 18 Aug 2022 14:43:45 +0100 Subject: [PATCH 256/265] Upgrade OpenTelemetry API and remove workaround (#89438) Closes #89414. Remove the workaround from #89135 that addressed #89107, and instead upgrade the OpenTelemetry API, which contains a fix for the underlying issue. --- gradle/verification-metadata.xml | 18 ++++++++--------- modules/apm/build.gradle | 8 +++++--- .../elasticsearch/tracing/apm/APMTracer.java | 20 +++++++------------ 3 files changed, 21 insertions(+), 25 deletions(-) diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index e6eae1a422579..ab4c1b120cfd6 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -1254,19 +1254,19 @@ - - - + + + - - - + + + - - - + + + diff --git a/modules/apm/build.gradle b/modules/apm/build.gradle index 6bb1c544d096e..2b79dfe4b1572 100644 --- a/modules/apm/build.gradle +++ b/modules/apm/build.gradle @@ -12,10 +12,12 @@ esplugin { classname 'org.elasticsearch.tracing.apm.APM' } +def otelVersion = '1.17.0' + dependencies { - implementation "io.opentelemetry:opentelemetry-api:1.15.0" - implementation "io.opentelemetry:opentelemetry-context:1.15.0" - implementation "io.opentelemetry:opentelemetry-semconv:1.15.0-alpha" + implementation "io.opentelemetry:opentelemetry-api:${otelVersion}" + implementation "io.opentelemetry:opentelemetry-context:${otelVersion}" + implementation "io.opentelemetry:opentelemetry-semconv:${otelVersion}-alpha" runtimeOnly "co.elastic.apm:elastic-apm-agent:1.33.0" } diff --git a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java index a5d43bb8a6672..d1d7ce113b344 100644 --- a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java +++ b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java @@ -286,7 +286,7 @@ private void setSpanAttributes(ThreadContext threadContext, @Nullable Map> { @Override From 58ddca3b8180fd03c62919e765953e03b95638ff Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Thu, 18 Aug 2022 15:44:18 +0200 Subject: [PATCH 257/265] feature: support metrics for multi value fields (#88818) --- .../rest-api-spec/test/rollup/10_basic.yml | 27 +++++++++++++------ .../xpack/rollup/v2/RollupShardIndexer.java | 26 +++++++++--------- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml b/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml index fee9a0ed0ed08..6c062ce4f4507 100644 --- a/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml +++ b/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml @@ -44,6 +44,12 @@ setup: type: keyword values: type: integer + multi-counter: + type: long + time_series_metric: counter + multi-gauge: + type: integer + time_series_metric: gauge network: properties: tx: @@ -58,21 +64,21 @@ setup: index: test body: - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}, "created_at": "2021-04-28T19:34:00.000Z", "running": false, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 6]}}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "multi-counter" : [10, 11, 12], "multi-gauge": [100, 200, 150], "network": {"tx": 2001818691, "rx": 802133794}, "created_at": "2021-04-28T19:34:00.000Z", "running": false, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 6]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.26", "network": {"tx": 2005177954, "rx": 801479970}, "created_at": "2021-04-28T19:35:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west1"], "values": [1, 1, 3]}}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.26", "multi-counter" : [21, 22, 23], "multi-gauge": [90, 91, 95], "network": {"tx": 2005177954, "rx": 801479970}, "created_at": "2021-04-28T19:35:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west1"], "values": [1, 1, 3]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T20:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.41", "network": {"tx": 2006223737, "rx": 802337279}, "created_at": "2021-04-28T19:36:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west2"], "values": [4, 1, 2]}}}' + - '{"@timestamp": "2021-04-28T20:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.41", "multi-counter" : [1, 5, 10], "multi-gauge": [103, 110, 109], "network": {"tx": 2006223737, "rx": 802337279}, "created_at": "2021-04-28T19:36:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west2"], "values": [4, 1, 2]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T20:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.22", "network": {"tx": 2012916202, "rx": 803685721}, "created_at": "2021-04-28T19:37:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 1]}}}' + - '{"@timestamp": "2021-04-28T20:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.22", "multi-counter" : [101, 102, 105], "multi-gauge": [100, 100, 100], "network": {"tx": 2012916202, "rx": 803685721}, "created_at": "2021-04-28T19:37:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 1]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.33", "network": {"tx": 1434521831, "rx": 530575198}, "created_at": "2021-04-28T19:42:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test"], "values": [2, 3, 4]}}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.33", "multi-counter" : [7, 11, 44], "multi-gauge": [100, 100, 102], "network": {"tx": 1434521831, "rx": 530575198}, "created_at": "2021-04-28T19:42:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test"], "values": [2, 3, 4]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.56", "network": {"tx": 1434577921, "rx": 530600088}, "created_at": "2021-04-28T19:43:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test", "us-west2"], "values": [2, 1, 1]}}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.56", "multi-counter" : [0, 0, 1], "multi-gauge": [101, 102, 102], "network": {"tx": 1434577921, "rx": 530600088}, "created_at": "2021-04-28T19:43:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test", "us-west2"], "values": [2, 1, 1]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.37", "network": {"tx": 1434587694, "rx": 530604797}, "created_at": "2021-04-28T19:44:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [4, 5, 2]}}}' + - '{"@timestamp": "2021-04-28T19:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.37", "multi-counter" : [1000, 1001, 1002], "multi-gauge": [99, 100, 110], "network": {"tx": 1434587694, "rx": 530604797}, "created_at": "2021-04-28T19:44:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [4, 5, 2]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.120", "network": {"tx": 1434595272, "rx": 530605511}, "created_at": "2021-04-28T19:45:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [3, 2, 1]}}}' + - '{"@timestamp": "2021-04-28T19:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.120", "multi-counter" : [76, 77, 78], "multi-gauge": [95, 98, 100], "network": {"tx": 1434595272, "rx": 530605511}, "created_at": "2021-04-28T19:45:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [3, 2, 1]}}}' - do: indices.put_settings: @@ -106,6 +112,11 @@ setup: - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } + - match: { hits.hits.0._source.k8s\.pod\.multi-counter: 21 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.min: 90 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.max: 200 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.sum: 726 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.value_count: 6 } - match: { hits.hits.0._source.k8s\.pod\.network\.tx.min: 2001818691 } - match: { hits.hits.0._source.k8s\.pod\.network\.tx.max: 2005177954 } - match: { hits.hits.0._source.k8s\.pod\.network\.tx.value_count: 2 } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/RollupShardIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/RollupShardIndexer.java index 04a9c054b0332..c439d38eae568 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/RollupShardIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/v2/RollupShardIndexer.java @@ -396,15 +396,15 @@ public RollupBucketBuilder resetTimestamp(long timestamp) { } public void collect(final String field, int docValueCount, final Function fieldValues) { - final Object[] value = fieldValues.apply(docValueCount); + final Object[] values = fieldValues.apply(docValueCount); if (metricFieldProducers.containsKey(field)) { // TODO: missing support for array metrics - collectMetric(field, value[0]); + collectMetric(field, values); } else if (labelFieldProducers.containsKey(field)) { - if (value.length == 1) { - collectLabel(field, value[0]); + if (values.length == 1) { + collectLabel(field, values[0]); } else { - collectLabel(field, value); + collectLabel(field, values); } } else { throw new IllegalArgumentException( @@ -423,13 +423,15 @@ private void collectLabel(final String field, final Object value) { labelFieldProducers.get(field).collect(value); } - private void collectMetric(final String field, final Object value) { - if (value instanceof Number number) { - metricFieldProducers.get(field).collect(number); - } else { - throw new IllegalArgumentException( - "Expected numeric value for field '" + field + "' but got non numeric value: '" + value + "'" - ); + private void collectMetric(final String field, final Object[] values) { + for (var value : values) { + if (value instanceof Number number) { + metricFieldProducers.get(field).collect(number); + } else { + throw new IllegalArgumentException( + "Expected numeric value for field '" + field + "' but got non numeric value: '" + value + "'" + ); + } } } From 22e1150dd6b6b2365666cc6a93b5fa58fb2ac581 Mon Sep 17 00:00:00 2001 From: mushaoqiong Date: Thu, 18 Aug 2022 22:31:16 +0800 Subject: [PATCH 258/265] Reuse Info in lifecycle step (#89419) We have a `SingleMessageFiledInfo` defined in `org/elasticsearch/xpack/core/ilm/step/info` to provide single message info for `AsyncWaitStep` and `ClusterStateWaitStep`.But there are still some steps like `CheckNotDataStreamWriteIndexStep` defining their own single message info. This pr removes the duplicated info defination in these steps and use `SingleMessageFiledInfo` instand. --- docs/changelog/89419.yaml | 5 ++ .../ilm/CheckNotDataStreamWriteIndexStep.java | 50 +-------------- .../core/ilm/WaitForActiveShardsStep.java | 43 ++----------- .../xpack/core/ilm/WaitForIndexColorStep.java | 61 +++---------------- .../core/ilm/WaitForNoFollowersStep.java | 48 ++------------- .../CheckNoDataStreamWriteIndexStepTests.java | 3 +- .../core/ilm/WaitForIndexColorStepTests.java | 13 ++-- 7 files changed, 34 insertions(+), 189 deletions(-) create mode 100644 docs/changelog/89419.yaml diff --git a/docs/changelog/89419.yaml b/docs/changelog/89419.yaml new file mode 100644 index 0000000000000..95cb2be603b44 --- /dev/null +++ b/docs/changelog/89419.yaml @@ -0,0 +1,5 @@ +pr: 89419 +summary: Reuse Info in lifecycle step +area: ILM+SLM +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckNotDataStreamWriteIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckNotDataStreamWriteIndexStep.java index e790aff389510..c8e90427e6feb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckNotDataStreamWriteIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckNotDataStreamWriteIndexStep.java @@ -13,13 +13,9 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.index.Index; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; -import java.io.IOException; import java.util.Locale; -import java.util.Objects; /** * Some actions cannot be executed on a data stream's write index (eg. `searchable-snapshot`). This step checks if the managed index is @@ -57,7 +53,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { ); // Index must have been since deleted logger.debug(errorMessage); - return new Result(false, new Info(errorMessage)); + return new Result(false, new SingleMessageFieldInfo(errorMessage)); } String policyName = indexMetadata.getLifecyclePolicyName(); @@ -77,50 +73,10 @@ public Result isConditionMet(Index index, ClusterState clusterState) { policyName ); logger.debug(errorMessage); - return new Result(false, new Info(errorMessage)); + return new Result(false, new SingleMessageFieldInfo(errorMessage)); } } return new Result(true, null); } - - static final class Info implements ToXContentObject { - - private final String message; - - static final ParseField MESSAGE = new ParseField("message"); - - Info(String message) { - this.message = message; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(MESSAGE.getPreferredName(), message); - builder.endObject(); - return builder; - } - - public String getMessage() { - return message; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - Info info = (Info) o; - return Objects.equals(message, info.message); - } - - @Override - public int hashCode() { - return Objects.hash(message); - } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsStep.java index b4a2af3bda603..8af173eda920b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsStep.java @@ -20,6 +20,7 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; import java.io.IOException; import java.util.List; @@ -61,7 +62,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { ); // Index must have been since deleted logger.debug(errorMessage); - return new Result(false, new Info(errorMessage)); + return new Result(false, new SingleMessageFieldInfo(errorMessage)); } boolean indexingComplete = LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING.get(originalIndexMeta.getSettings()); @@ -73,7 +74,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { WaitForActiveShardsStep.NAME ); logger.trace(message); - return new Result(true, new Info(message)); + return new Result(true, new SingleMessageFieldInfo(message)); } IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(index.getName()); @@ -149,7 +150,7 @@ private static Result getErrorResultOnNullMetadata(StepKey key, Index originalIn // Index must have been since deleted logger.debug(errorMessage); - return new Result(false, new Info(errorMessage)); + return new Result(false, new SingleMessageFieldInfo(errorMessage)); } static final class ActiveShardsInfo implements ToXContentObject { @@ -211,40 +212,4 @@ public int hashCode() { return Objects.hash(currentActiveShardsCount, targetActiveShardsCount, enoughShardsActive, message); } } - - static final class Info implements ToXContentObject { - - private final String message; - - static final ParseField MESSAGE = new ParseField("message"); - - Info(String message) { - this.message = message; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(MESSAGE.getPreferredName(), message); - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - Info info = (Info) o; - return Objects.equals(message, info.message); - } - - @Override - public int hashCode() { - return Objects.hash(message); - } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java index 59982b4d7931d..7a464390784d9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java @@ -17,11 +17,8 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; -import java.io.IOException; import java.util.Locale; import java.util.Objects; import java.util.function.BiFunction; @@ -100,7 +97,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { indexName ); logger.debug(errorMessage); - return new Result(false, new Info(errorMessage)); + return new Result(false, new SingleMessageFieldInfo(errorMessage)); } IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(indexMetadata.getIndex()); @@ -119,79 +116,39 @@ public boolean isRetryable() { private static Result waitForRed(IndexRoutingTable indexRoutingTable) { if (indexRoutingTable == null) { - return new Result(true, new Info("index is red")); + return new Result(true, new SingleMessageFieldInfo("index is red")); } - return new Result(false, new Info("index is not red")); + return new Result(false, new SingleMessageFieldInfo("index is not red")); } private static Result waitForYellow(IndexRoutingTable indexRoutingTable) { if (indexRoutingTable == null) { - return new Result(false, new Info("index is red; no indexRoutingTable")); + return new Result(false, new SingleMessageFieldInfo("index is red; no indexRoutingTable")); } boolean indexIsAtLeastYellow = indexRoutingTable.allPrimaryShardsActive(); if (indexIsAtLeastYellow) { return new Result(true, null); } else { - return new Result(false, new Info("index is red; not all primary shards are active")); + return new Result(false, new SingleMessageFieldInfo("index is red; not all primary shards are active")); } } private static Result waitForGreen(IndexRoutingTable indexRoutingTable) { if (indexRoutingTable == null) { - return new Result(false, new Info("index is red; no indexRoutingTable")); + return new Result(false, new SingleMessageFieldInfo("index is red; no indexRoutingTable")); } if (indexRoutingTable.allPrimaryShardsActive()) { for (int i = 0; i < indexRoutingTable.size(); i++) { boolean replicaIndexIsGreen = indexRoutingTable.shard(i).replicaShards().stream().allMatch(ShardRouting::active); if (replicaIndexIsGreen == false) { - return new Result(false, new Info("index is yellow; not all replica shards are active")); + return new Result(false, new SingleMessageFieldInfo("index is yellow; not all replica shards are active")); } } return new Result(true, null); } - return new Result(false, new Info("index is not green; not all shards are active")); - } - - static final class Info implements ToXContentObject { - - static final ParseField MESSAGE_FIELD = new ParseField("message"); - - private final String message; - - Info(String message) { - this.message = message; - } - - String getMessage() { - return message; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(MESSAGE_FIELD.getPreferredName(), message); - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (o == null) { - return false; - } - if (getClass() != o.getClass()) { - return false; - } - Info info = (Info) o; - return Objects.equals(getMessage(), info.getMessage()); - } - - @Override - public int hashCode() { - return Objects.hash(getMessage()); - } + return new Result(false, new SingleMessageFieldInfo("index is not green; not all shards are active")); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStep.java index dc0eab829add2..79d55243b58a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStep.java @@ -17,14 +17,10 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; -import java.io.IOException; import java.util.Arrays; import java.util.Collection; -import java.util.Objects; import java.util.Optional; /** @@ -39,6 +35,8 @@ public class WaitForNoFollowersStep extends AsyncWaitStep { static final String NAME = "wait-for-shard-history-leases"; static final String CCR_LEASE_KEY = "ccr"; + private static final String WAIT_MESSAGE = "this index is a leader index; waiting for all following indices to cease " + + "following before proceeding"; WaitForNoFollowersStep(StepKey key, StepKey nextStepKey, Client client) { super(key, nextStepKey, client); @@ -73,48 +71,10 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, .anyMatch(lease -> lease.isPresent() && lease.get().anyMatch(l -> CCR_LEASE_KEY.equals(l.source()))); if (isCurrentlyLeaderIndex) { - listener.onResponse(false, new Info()); + listener.onResponse(false, new SingleMessageFieldInfo(WAIT_MESSAGE)); } else { listener.onResponse(true, null); } }, listener::onFailure)); } - - static final class Info implements ToXContentObject { - - static final ParseField MESSAGE_FIELD = new ParseField("message"); - - private static final String message = "this index is a leader index; waiting for all following indices to cease " - + "following before proceeding"; - - Info() {} - - static String getMessage() { - return message; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(MESSAGE_FIELD.getPreferredName(), message); - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - return true; - } - - @Override - public int hashCode() { - return Objects.hash(getMessage()); - } - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java index e7fb3b40f30e3..07a9d975a1a98 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; import java.util.List; @@ -80,7 +81,7 @@ public void testStepIncompleteIfIndexIsTheDataStreamWriteIndex() { ClusterStateWaitStep.Result result = createRandomInstance().isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - CheckNotDataStreamWriteIndexStep.Info info = (CheckNotDataStreamWriteIndexStep.Info) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); assertThat( info.getMessage(), is( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java index 9259c63f243e9..89d2381ca1d74 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.xpack.core.ilm.Step.StepKey; +import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; import java.util.function.BiFunction; @@ -125,7 +126,7 @@ public void testConditionNotMetForGreen() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - WaitForIndexColorStep.Info info = (WaitForIndexColorStep.Info) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); assertThat(info, notNullValue()); assertThat(info.getMessage(), equalTo("index is not green; not all shards are active")); } @@ -145,7 +146,7 @@ public void testConditionNotMetNoIndexRoutingTable() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.YELLOW); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - WaitForIndexColorStep.Info info = (WaitForIndexColorStep.Info) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); assertThat(info, notNullValue()); assertThat(info.getMessage(), equalTo("index is red; no indexRoutingTable")); } @@ -199,7 +200,7 @@ public void testConditionNotMetForYellow() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.YELLOW); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - WaitForIndexColorStep.Info info = (WaitForIndexColorStep.Info) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); assertThat(info, notNullValue()); assertThat(info.getMessage(), equalTo("index is red; not all primary shards are active")); } @@ -219,7 +220,7 @@ public void testConditionNotMetNoIndexRoutingTableForYellow() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.YELLOW); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - WaitForIndexColorStep.Info info = (WaitForIndexColorStep.Info) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); assertThat(info, notNullValue()); assertThat(info.getMessage(), equalTo("index is red; no indexRoutingTable")); } @@ -249,7 +250,7 @@ public void testStepReturnsFalseIfTargetIndexIsMissing() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN, indexPrefix); ClusterStateWaitStep.Result result = step.isConditionMet(originalIndex.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - WaitForIndexColorStep.Info info = (WaitForIndexColorStep.Info) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); String targetIndex = indexPrefix + originalIndex.getIndex().getName(); assertThat( info.getMessage(), @@ -309,7 +310,7 @@ public void testStepWaitsForTargetIndexHealthWhenPrefixConfigured() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN); ClusterStateWaitStep.Result result = step.isConditionMet(originalIndex.getIndex(), clusterTargetInitializing); assertThat(result.isComplete(), is(false)); - WaitForIndexColorStep.Info info = (WaitForIndexColorStep.Info) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); assertThat(info.getMessage(), is("index is not green; not all shards are active")); } From 058ea4594a93154807829dc89bc776e0bf5ac41b Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Thu, 18 Aug 2022 07:33:05 -0700 Subject: [PATCH 259/265] Add source fallback support for date and date_nanos mapped types (#89440) This change adds source fallback support for date and date_nanos by using the existing SourceValueFetcherSortedNumericIndexFieldData to emulate doc values. --- docs/changelog/89440.yaml | 5 + .../test/painless/50_script_doc_values.yml | 248 ++++++++++++++++++ .../index/mapper/DateFieldMapper.java | 43 ++- 3 files changed, 294 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/89440.yaml diff --git a/docs/changelog/89440.yaml b/docs/changelog/89440.yaml new file mode 100644 index 0000000000000..816c43467375a --- /dev/null +++ b/docs/changelog/89440.yaml @@ -0,0 +1,5 @@ +pr: 89440 +summary: Add source fallback support for date and `date_nanos` mapped types +area: Mapping +type: enhancement +issues: [] diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml index 979f0a1cdf7df..053a12ae3ba72 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -14,8 +14,14 @@ setup: doc_values: false date: type: date + date_no_doc_values: + type: date + doc_values: false nanos: type: date_nanos + nanos_no_doc_values: + type: date_nanos + doc_values: false geo_point: type: geo_point geo_point_no_doc_values: @@ -93,7 +99,9 @@ setup: boolean: true boolean_no_doc_values: true date: 2017-01-01T12:11:12 + date_no_doc_values: 2017-01-01T12:11:12 nanos: 2015-01-01T12:10:30.123456789Z + nanos_no_doc_values: 2015-01-01T12:10:30.123456789Z geo_point: 41.12,-71.34 geo_point_no_doc_values: 41.12,-71.34 ip: 192.168.0.19 @@ -136,7 +144,9 @@ setup: boolean_no_doc_values: [true, false, true] ip: ["10.1.2.3", "2001:db8::2:1"] date: [2017-01-01T12:11:12, 2018-01-01T12:11:12] + date_no_doc_values: [2017-01-01T12:11:12, 2018-01-01T12:11:12] nanos: [2015-01-01T12:10:30.123456789Z, 2015-01-01T12:10:30.987654321Z] + nanos_no_doc_values: [2015-01-01T12:10:30.123456789Z, 2015-01-01T12:10:30.987654321Z] geo_point: [[-71.34,41.12],[60.32,21.25]] geo_point_no_doc_values: [[60.32,21.25],[-71.34,41.12]] keyword: ["one string", "another string"] @@ -692,6 +702,244 @@ setup: source: "List times = new ArrayList(); for (ZonedDateTime zdt : field('nanos')) times.add(zdt); times" - match: { hits.hits.0.fields.field: ["2015-01-01T12:10:30.123456789Z", "2015-01-01T12:10:30.987654321Z"] } +--- +"date_no_doc_values": + - skip: + features: "warnings" + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc.date_no_doc_values.get(0)" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "doc.date_no_doc_values.value" + - match: { error.failed_shards.0.reason.caused_by.type: "illegal_argument_exception" } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "field('date_no_doc_values').get(null)" + - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12.000Z' } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('date_no_doc_values', null)" + - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12.000Z' } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: 1 } } + script_fields: + field: + script: + source: "field('date_no_doc_values').get(null).getMillis()" + - match: { hits.hits.0.fields.field.0: 1483272672000 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: 1 } } + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('date_no_doc_values', null).getMillis()" + - match: { hits.hits.0.fields.field.0: 1483272672000 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: 1 } } + script_fields: + field: + script: + source: "field('date_no_doc_values').get(null).millis" + - match: { hits.hits.0.fields.field.0: 1483272672000 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: 1 } } + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('date_no_doc_values', null).millis" + - match: { hits.hits.0.fields.field.0: 1483272672000 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "2" } } + script_fields: + field: + script: + source: "field('date_no_doc_values').get(ZonedDateTime.parse('2018-01-01T12:11:12.000Z'))" + - match: { hits.hits.0.fields.field.0: '2018-01-01T12:11:12.000Z' } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "2" } } + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('date_no_doc_values', ZonedDateTime.parse('2018-01-01T12:11:12.000Z'))" + - match: { hits.hits.0.fields.field.0: '2018-01-01T12:11:12.000Z' } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "field('nanos_no_doc_values').get(null)" + - match: { hits.hits.0.fields.field.0: '2015-01-01T12:10:30.123456789Z' } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('nanos_no_doc_values', null)" + - match: { hits.hits.0.fields.field.0: '2015-01-01T12:10:30.123456789Z' } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "2" } } + script_fields: + field: + script: + source: "field('nanos_no_doc_values').get(ZonedDateTime.parse('2016-01-01T12:10:30.123Z'))" + - match: { hits.hits.0.fields.field.0: '2016-01-01T12:10:30.123Z' } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "2" } } + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('nanos_no_doc_values', ZonedDateTime.parse('2016-01-01T12:10:30.123Z'))" + - match: { hits.hits.0.fields.field.0: '2016-01-01T12:10:30.123Z' } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "field('nanos_no_doc_values').get(null).getNano()" + - match: { hits.hits.0.fields.field.0: 123456789 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "1" } } + script_fields: + field: + script: + source: "/* avoid yaml stash */ $('nanos_no_doc_values', null).getNano()" + - match: { hits.hits.0.fields.field.0: 123456789 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "2" } } + script_fields: + field: + script: + source: "field('nanos_no_doc_values').get(ZonedDateTime.parse('2016-01-01T12:10:30.123Z')).getNano()" + - match: { hits.hits.0.fields.field.0: 123000000 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "3" } } + script_fields: + field: + script: + source: "field('date_no_doc_values').get(1, null)" + - match: { hits.hits.0.fields.field.0: "2018-01-01T12:11:12.000Z" } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "3" } } + script_fields: + field: + script: + source: "field('nanos_no_doc_values').get(1, null)" + - match: { hits.hits.0.fields.field.0: "2015-01-01T12:10:30.987654321Z" } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "3" } } + script_fields: + field: + script: + source: "List times = new ArrayList(); for (ZonedDateTime zdt : field('date_no_doc_values')) times.add(zdt); times" + - match: { hits.hits.0.fields.field: ["2017-01-01T12:11:12.000Z", "2018-01-01T12:11:12.000Z"] } + + - do: + search: + rest_total_hits_as_int: true + body: + query: { term: { _id: "3" } } + script_fields: + field: + script: + source: "List times = new ArrayList(); for (ZonedDateTime zdt : field('nanos_no_doc_values')) times.add(zdt); times" + - match: { hits.hits.0.fields.field: ["2015-01-01T12:10:30.123456789Z", "2015-01-01T12:10:30.987654321Z"] } + --- "geo_point": - do: diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index c3728b8205025..f61cca78ec2ba 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -36,6 +36,7 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.SourceValueFetcherSortedNumericIndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.query.DateRangeIncludingNowQuery; import org.elasticsearch.index.query.QueryRewriteContext; @@ -64,6 +65,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.LongSupplier; @@ -519,6 +521,17 @@ public String parseSourceValue(Object value) { }; } + // returns a Long to support source fallback which emulates numeric doc values for dates + private SourceValueFetcher sourceValueFetcher(Set sourcePaths) { + return new SourceValueFetcher(sourcePaths, nullValue) { + @Override + public Long parseSourceValue(Object value) { + String date = value instanceof Number ? NUMBER_FORMAT.format(value) : value.toString(); + return parse(date); + } + }; + } + private String format(long timestamp, DateFormatter formatter) { ZonedDateTime dateTime = resolution().toInstant(timestamp).atZone(ZoneOffset.UTC); return formatter.format(dateTime); @@ -750,8 +763,34 @@ public Function pointReaderIfPossible() { @Override public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { - failIfNoDocValues(); - return new SortedNumericIndexFieldData.Builder(name(), resolution.numericType(), resolution.getDefaultToScriptFieldFactory()); + FielddataOperation operation = fieldDataContext.fielddataOperation(); + + if (operation == FielddataOperation.SEARCH) { + failIfNoDocValues(); + } + + if ((operation == FielddataOperation.SEARCH || operation == FielddataOperation.SCRIPT) && hasDocValues()) { + return new SortedNumericIndexFieldData.Builder( + name(), + resolution.numericType(), + resolution.getDefaultToScriptFieldFactory() + ); + } + + if (operation == FielddataOperation.SCRIPT) { + SearchLookup searchLookup = fieldDataContext.lookupSupplier().get(); + Set sourcePaths = fieldDataContext.sourcePathsLookup().apply(name()); + + return new SourceValueFetcherSortedNumericIndexFieldData.Builder( + name(), + resolution.numericType().getValuesSourceType(), + sourceValueFetcher(sourcePaths), + searchLookup.source(), + resolution.getDefaultToScriptFieldFactory() + ); + } + + throw new IllegalStateException("unknown field data operation [" + operation.name() + "]"); } @Override From 9f29241e8d60e8dadc7e7f268f49e02aa4c6bb4d Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Thu, 18 Aug 2022 15:39:26 +0100 Subject: [PATCH 260/265] [ML] Performance improvements related to ECS Grok pattern usage (#89424) Swap out TOMCAT_DATESTAMP for TOMCATLEGACY_DATESTAMP when ECS compatibility is set to v1 See comments on #89386 Relates to #77065 --- .../TimestampFormatFinder.java | 24 +- .../TimestampFormatFinderTests.java | 830 ++++++++++-------- 2 files changed, 507 insertions(+), 347 deletions(-) diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinder.java index 93d7db489f905..86ca7b81c4263 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinder.java @@ -295,6 +295,9 @@ public final class TimestampFormatFinder { */ static final List ORDERED_CANDIDATE_FORMATS_ECS_V1; static { + // From libs/grok/src/main/resources/patterns/ecs-v1/java + // TOMCAT_DATESTAMP (?:%{CATALINA8_DATESTAMP})|(?:%{CATALINA7_DATESTAMP})|(?:%{TOMCATLEGACY_DATESTAMP}) + List items = new ArrayList<>(); // CATALINA8_DATESTAMP %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} // Where SECOND is defined as (?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) @@ -325,11 +328,30 @@ public final class TimestampFormatFinder { 3 ) ); + // From libs/grok/src/main/resources/patterns/ecs-v1/java + // TOMCATLEGACY_DATESTAMP %{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND}(?: %{ISO8601_TIMEZONE})? + // This is effectively a renaming of TOMCAT_DATESTAMP defined in libs/grok/src/main/resources/patterns/legacy/java + items.add( + new CandidateTimestampFormat( + example -> CandidateTimestampFormat.iso8601LikeFormatFromExample(example, " ", " "), + "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}[:.,]\\d{3}", + "\\b20\\d{2}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9} (?:Z|[+-]%{HOUR}%{MINUTE})\\b", + "TOMCATLEGACY_DATESTAMP", + "1111 11 11 11 11 11 111", + 0, + 13 + ) + ); + items.addAll( ORDERED_CANDIDATE_FORMATS.stream() - .filter(p -> "CATALINA_DATESTAMP".equals(p.outputGrokPatternName) == false) + .filter( + p -> (("CATALINA_DATESTAMP".equals(p.outputGrokPatternName) == false) + && ("TOMCAT_DATESTAMP".equals(p.outputGrokPatternName) == false)) + ) .collect(Collectors.toList()) ); + ORDERED_CANDIDATE_FORMATS_ECS_V1 = Collections.unmodifiableList(items); } diff --git a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinderTests.java b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinderTests.java index 31929e38cdbdb..dd8bd7f5cc172 100644 --- a/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinderTests.java +++ b/x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/TimestampFormatFinderTests.java @@ -848,375 +848,491 @@ public void testFindFormatGivenNoMatch() { public void testFindFormatGivenOnlyIso8601() { - validateTimestampMatch( - "2018-05-15T16:14:56,374Z", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400896374L - ); - validateTimestampMatch( - "2018-05-15T17:14:56,374+0100", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400896374L - ); - validateTimestampMatch( - "2018-05-15T17:14:56,374+01:00", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400896374L - ); - validateTimestampMatch( - "2018-05-15T17:14:56,374", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400896374L - ); + Consumer testFindFormatGivenOnlyIso8601AndEcsCompatibility = (ecsCompatibility) -> { + validateTimestampMatch( + "2018-05-15T16:14:56,374Z", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400896374L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15T17:14:56,374+0100", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400896374L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15T17:14:56,374+01:00", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400896374L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15T17:14:56,374", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400896374L, + ecsCompatibility + ); - validateTimestampMatch( - "2018-05-15T16:14:56Z", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400896000L - ); - validateTimestampMatch( - "2018-05-15T17:14:56+0100", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400896000L - ); - validateTimestampMatch( - "2018-05-15T17:14:56+01:00", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400896000L - ); - validateTimestampMatch( - "2018-05-15T17:14:56", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400896000L - ); + validateTimestampMatch( + "2018-05-15T16:14:56Z", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400896000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15T17:14:56+0100", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400896000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15T17:14:56+01:00", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400896000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15T17:14:56", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400896000L, + ecsCompatibility + ); - validateTimestampMatch( - "2018-05-15T16:14Z", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400840000L - ); - validateTimestampMatch( - "2018-05-15T17:14+0100", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400840000L - ); - validateTimestampMatch( - "2018-05-15T17:14+01:00", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400840000L - ); - validateTimestampMatch( - "2018-05-15T17:14", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "ISO8601", - 1526400840000L - ); + validateTimestampMatch( + "2018-05-15T16:14Z", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400840000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15T17:14+0100", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400840000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15T17:14+01:00", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400840000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15T17:14", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", + 1526400840000L, + ecsCompatibility + ); - // TIMESTAMP_ISO8601 doesn't match ISO8601 if it's only a date with no time - validateTimestampMatch("2018-05-15", "CUSTOM_TIMESTAMP", "\\b\\d{4}-\\d{2}-\\d{2}\\b", "ISO8601", 1526338800000L); + // TIMESTAMP_ISO8601 doesn't match ISO8601 if it's only a date with no time + validateTimestampMatch( + "2018-05-15", + "CUSTOM_TIMESTAMP", + "\\b\\d{4}-\\d{2}-\\d{2}\\b", + "ISO8601", + 1526338800000L, + ecsCompatibility + ); - validateTimestampMatch( - "2018-05-15 16:14:56,374Z", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mm:ss,SSSXX", - 1526400896374L - ); - validateTimestampMatch( - "2018-05-15 17:14:56,374+0100", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mm:ss,SSSXX", - 1526400896374L - ); - validateTimestampMatch( - "2018-05-15 17:14:56,374+01:00", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mm:ss,SSSXXX", - 1526400896374L - ); - validateTimestampMatch( - "2018-05-15 17:14:56,374", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mm:ss,SSS", - 1526400896374L - ); + validateTimestampMatch( + "2018-05-15 16:14:56,374Z", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSSXX", + 1526400896374L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15 17:14:56,374+0100", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSSXX", + 1526400896374L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15 17:14:56,374+01:00", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSSXXX", + 1526400896374L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15 17:14:56,374", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSS", + 1526400896374L, + ecsCompatibility + ); - validateTimestampMatch( - "2018-05-15 16:14:56Z", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mm:ssXX", - 1526400896000L - ); - validateTimestampMatch( - "2018-05-15 17:14:56+0100", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mm:ssXX", - 1526400896000L - ); - validateTimestampMatch( - "2018-05-15 17:14:56+01:00", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mm:ssXXX", - 1526400896000L - ); - validateTimestampMatch( - "2018-05-15 17:14:56", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mm:ss", - 1526400896000L - ); + validateTimestampMatch( + "2018-05-15 16:14:56Z", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ssXX", + 1526400896000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15 17:14:56+0100", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ssXX", + 1526400896000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15 17:14:56+01:00", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ssXXX", + 1526400896000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15 17:14:56", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss", + 1526400896000L, + ecsCompatibility + ); + + validateTimestampMatch( + "2018-05-15 16:14Z", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mmXX", + 1526400840000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15 17:14+0100", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mmXX", + 1526400840000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15 17:14+01:00", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mmXXX", + 1526400840000L, + ecsCompatibility + ); + validateTimestampMatch( + "2018-05-15 17:14", + "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm", + 1526400840000L, + ecsCompatibility + ); + }; + + ecsCompatibilityModes.forEach(testFindFormatGivenOnlyIso8601AndEcsCompatibility); - validateTimestampMatch( - "2018-05-15 16:14Z", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mmXX", - 1526400840000L - ); - validateTimestampMatch( - "2018-05-15 17:14+0100", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mmXX", - 1526400840000L - ); - validateTimestampMatch( - "2018-05-15 17:14+01:00", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mmXXX", - 1526400840000L - ); - validateTimestampMatch( - "2018-05-15 17:14", - "TIMESTAMP_ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", - "yyyy-MM-dd HH:mm", - 1526400840000L - ); } public void testFindFormatGivenOnlyKnownTimestampFormat() { // Note: some of the time formats give millisecond accuracy, some second accuracy and some minute accuracy - validateTimestampMatch( - "2018-05-15 17:14:56,374 +0100", - "TOMCAT_DATESTAMP", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}[:.,]\\d{3}", - "yyyy-MM-dd HH:mm:ss,SSS XX", - 1526400896374L - ); + Consumer testFindFormatGivenOnlyKnownTimestampFormatAndEcsCompatibility = (ecsCompatibility) -> { + validateTimestampMatch( + "2018-05-15 17:14:56,374 +0100", + "TOMCAT_DATESTAMP", + "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}[:.,]\\d{3}", + "yyyy-MM-dd HH:mm:ss,SSS XX", + 1526400896374L, + ecsCompatibility + ); - validateTimestampMatch( - "Tue May 15 18 16:14:56 UTC", - "DATESTAMP_RFC822", - "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{1,2} \\d{2} \\d{2}:\\d{2}:\\d{2}\\b", - Arrays.asList("EEE MMM dd yy HH:mm:ss zzz", "EEE MMM d yy HH:mm:ss zzz"), - 1526400896000L - ); + validateTimestampMatch( + "Tue May 15 18 16:14:56 UTC", + "DATESTAMP_RFC822", + "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{1,2} \\d{2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("EEE MMM dd yy HH:mm:ss zzz", "EEE MMM d yy HH:mm:ss zzz"), + 1526400896000L, + ecsCompatibility + ); - validateTimestampMatch( - "Tue, 15 May 2018 17:14:56 +01:00", - "DATESTAMP_RFC2822", - "\\b[A-Z]\\S{2}, \\d{1,2} [A-Z]\\S{2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", - "EEE, dd MMM yyyy HH:mm:ss XXX", - 1526400896000L - ); - validateTimestampMatch( - "Tue, 15 May 2018 17:14:56 +0100", - "DATESTAMP_RFC2822", - "\\b[A-Z]\\S{2}, \\d{1,2} [A-Z]\\S{2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", - "EEE, dd MMM yyyy HH:mm:ss XX", - 1526400896000L - ); + validateTimestampMatch( + "Tue, 15 May 2018 17:14:56 +01:00", + "DATESTAMP_RFC2822", + "\\b[A-Z]\\S{2}, \\d{1,2} [A-Z]\\S{2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", + "EEE, dd MMM yyyy HH:mm:ss XXX", + 1526400896000L, + ecsCompatibility + ); + validateTimestampMatch( + "Tue, 15 May 2018 17:14:56 +0100", + "DATESTAMP_RFC2822", + "\\b[A-Z]\\S{2}, \\d{1,2} [A-Z]\\S{2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", + "EEE, dd MMM yyyy HH:mm:ss XX", + 1526400896000L, + ecsCompatibility + ); - validateTimestampMatch( - "Tue May 15 16:14:56 UTC 2018", - "DATESTAMP_OTHER", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", - Arrays.asList("EEE MMM dd HH:mm:ss zzz yyyy", "EEE MMM d HH:mm:ss zzz yyyy"), - 1526400896000L - ); + validateTimestampMatch( + "Tue May 15 16:14:56 UTC 2018", + "DATESTAMP_OTHER", + "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("EEE MMM dd HH:mm:ss zzz yyyy", "EEE MMM d HH:mm:ss zzz yyyy"), + 1526400896000L, + ecsCompatibility + ); - validateTimestampMatch("20180515171456", "DATESTAMP_EVENTLOG", "\\b\\d{14}\\b", "yyyyMMddHHmmss", 1526400896000L); + validateTimestampMatch( + "20180515171456", + "DATESTAMP_EVENTLOG", + "\\b\\d{14}\\b", + "yyyyMMddHHmmss", + 1526400896000L, + ecsCompatibility + ); - validateTimestampMatch( - "Tue May 15 17:14:56 2018", - "HTTPDERROR_DATE", - "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{2} \\d{2}:\\d{2}:\\d{2} \\d{4}\\b", - "EEE MMM dd HH:mm:ss yyyy", - 1526400896000L - ); + validateTimestampMatch( + "Tue May 15 17:14:56 2018", + "HTTPDERROR_DATE", + "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{2} \\d{2}:\\d{2}:\\d{2} \\d{4}\\b", + "EEE MMM dd HH:mm:ss yyyy", + 1526400896000L, + ecsCompatibility + ); - validateTimestampMatch( - "May 15 17:14:56.725", - "SYSLOGTIMESTAMP", - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", - Arrays.asList("MMM dd HH:mm:ss.SSS", "MMM d HH:mm:ss.SSS", "MMM d HH:mm:ss.SSS"), - 1526400896725L - ); - validateTimestampMatch( - "May 15 17:14:56", - "SYSLOGTIMESTAMP", - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", - Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss", "MMM d HH:mm:ss"), - 1526400896000L - ); + validateTimestampMatch( + "May 15 17:14:56.725", + "SYSLOGTIMESTAMP", + "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("MMM dd HH:mm:ss.SSS", "MMM d HH:mm:ss.SSS", "MMM d HH:mm:ss.SSS"), + 1526400896725L, + ecsCompatibility + ); + validateTimestampMatch( + "May 15 17:14:56", + "SYSLOGTIMESTAMP", + "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss", "MMM d HH:mm:ss"), + 1526400896000L, + ecsCompatibility + ); - validateTimestampMatch( - "15/May/2018:17:14:56 +0100", - "HTTPDATE", - "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", - "dd/MMM/yyyy:HH:mm:ss XX", - 1526400896000L - ); + validateTimestampMatch( + "15/May/2018:17:14:56 +0100", + "HTTPDATE", + "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", + "dd/MMM/yyyy:HH:mm:ss XX", + 1526400896000L, + ecsCompatibility + ); - validateTimestampMatch( - "May 15, 2018 5:14:56 PM", - "CATALINA_DATESTAMP", - "\\b[A-Z]\\S{2} \\d{2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", - "MMM dd, yyyy h:mm:ss a", - 1526400896000L - ); + validateTimestampMatch( + "May 15, 2018 5:14:56 PM", + "CATALINA_DATESTAMP", + "\\b[A-Z]\\S{2} \\d{2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", + "MMM dd, yyyy h:mm:ss a", + 1526400896000L, + ecsCompatibility + ); - validateTimestampMatch( - "May 15 2018 17:14:56", - "CISCOTIMESTAMP", - "\\b[A-Z]\\S{2} {1,2}\\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", - Arrays.asList("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss"), - 1526400896000L - ); + validateTimestampMatch( + "May 15 2018 17:14:56", + "CISCOTIMESTAMP", + "\\b[A-Z]\\S{2} {1,2}\\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss"), + 1526400896000L, + ecsCompatibility + ); - validateTimestampMatch( - "05/15/2018 17:14:56,374", - "DATESTAMP", - "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", - "MM/dd/yyyy HH:mm:ss,SSS", - 1526400896374L - ); - validateTimestampMatch( - "05-15-2018-17:14:56.374", - "DATESTAMP", - "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", - "MM-dd-yyyy-HH:mm:ss.SSS", - 1526400896374L - ); - validateTimestampMatch( - "15/05/2018 17:14:56.374", - "DATESTAMP", - "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", - "dd/MM/yyyy HH:mm:ss.SSS", - 1526400896374L - ); - validateTimestampMatch( - "15-05-2018-17:14:56,374", - "DATESTAMP", - "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", - "dd-MM-yyyy-HH:mm:ss,SSS", - 1526400896374L - ); - validateTimestampMatch( - "15.05.2018 17:14:56.374", - "DATESTAMP", - "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", - "dd.MM.yyyy HH:mm:ss.SSS", - 1526400896374L - ); - validateTimestampMatch( - "05/15/2018 17:14:56", - "DATESTAMP", - "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", - "MM/dd/yyyy HH:mm:ss", - 1526400896000L - ); - validateTimestampMatch( - "05-15-2018-17:14:56", - "DATESTAMP", - "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", - "MM-dd-yyyy-HH:mm:ss", - 1526400896000L - ); - validateTimestampMatch( - "15/05/2018 17:14:56", - "DATESTAMP", - "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", - "dd/MM/yyyy HH:mm:ss", - 1526400896000L - ); - validateTimestampMatch( - "15-05-2018-17:14:56", - "DATESTAMP", - "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", - "dd-MM-yyyy-HH:mm:ss", - 1526400896000L - ); - validateTimestampMatch( - "15.05.2018 17:14:56", - "DATESTAMP", - "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", - "dd.MM.yyyy HH:mm:ss", - 1526400896000L - ); + validateTimestampMatch( + "05/15/2018 17:14:56,374", + "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", + "MM/dd/yyyy HH:mm:ss,SSS", + 1526400896374L, + ecsCompatibility + ); + validateTimestampMatch( + "05-15-2018-17:14:56.374", + "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", + "MM-dd-yyyy-HH:mm:ss.SSS", + 1526400896374L, + ecsCompatibility + ); + validateTimestampMatch( + "15/05/2018 17:14:56.374", + "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", + "dd/MM/yyyy HH:mm:ss.SSS", + 1526400896374L, + ecsCompatibility + ); + validateTimestampMatch( + "15-05-2018-17:14:56,374", + "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", + "dd-MM-yyyy-HH:mm:ss,SSS", + 1526400896374L, + ecsCompatibility + ); + validateTimestampMatch( + "15.05.2018 17:14:56.374", + "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", + "dd.MM.yyyy HH:mm:ss.SSS", + 1526400896374L, + ecsCompatibility + ); + validateTimestampMatch( + "05/15/2018 17:14:56", + "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", + "MM/dd/yyyy HH:mm:ss", + 1526400896000L, + ecsCompatibility + ); + validateTimestampMatch( + "05-15-2018-17:14:56", + "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", + "MM-dd-yyyy-HH:mm:ss", + 1526400896000L, + ecsCompatibility + ); + validateTimestampMatch( + "15/05/2018 17:14:56", + "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", + "dd/MM/yyyy HH:mm:ss", + 1526400896000L, + ecsCompatibility + ); + validateTimestampMatch( + "15-05-2018-17:14:56", + "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", + "dd-MM-yyyy-HH:mm:ss", + 1526400896000L, + ecsCompatibility + ); + validateTimestampMatch( + "15.05.2018 17:14:56", + "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}[- ]\\d{2}:\\d{2}:\\d{2}\\b", + "dd.MM.yyyy HH:mm:ss", + 1526400896000L, + ecsCompatibility + ); - validateTimestampMatch("05/15/2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}\\b", "MM/dd/yyyy", 1526338800000L); - validateTimestampMatch("05-15-2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}\\b", "MM-dd-yyyy", 1526338800000L); - validateTimestampMatch("15/05/2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}\\b", "dd/MM/yyyy", 1526338800000L); - validateTimestampMatch("15-05-2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}\\b", "dd-MM-yyyy", 1526338800000L); - validateTimestampMatch("15.05.2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}\\b", "dd.MM.yyyy", 1526338800000L); + validateTimestampMatch( + "05/15/2018", + "DATE", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}\\b", + "MM/dd/yyyy", + 1526338800000L, + ecsCompatibility + ); + validateTimestampMatch( + "05-15-2018", + "DATE", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}\\b", + "MM-dd-yyyy", + 1526338800000L, + ecsCompatibility + ); + validateTimestampMatch( + "15/05/2018", + "DATE", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}\\b", + "dd/MM/yyyy", + 1526338800000L, + ecsCompatibility + ); + validateTimestampMatch( + "15-05-2018", + "DATE", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}\\b", + "dd-MM-yyyy", + 1526338800000L, + ecsCompatibility + ); + validateTimestampMatch( + "15.05.2018", + "DATE", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-](?:\\d{2}){1,2}\\b", + "dd.MM.yyyy", + 1526338800000L, + ecsCompatibility + ); - // The Kibana export format doesn't correspond to a built-in Grok pattern, so it has to be custom - validateTimestampMatch( - "May 15, 2018 @ 17:14:56.374", - "CUSTOM_TIMESTAMP", - "\\b[A-Z]\\S{2} \\d{1,2}, \\d{4} @ \\d{2}:\\d{2}:\\d{2}\\.\\d{3}\\b", - "MMM d, yyyy @ HH:mm:ss.SSS", - 1526400896374L - ); + // The Kibana export format doesn't correspond to a built-in Grok pattern, so it has to be custom + validateTimestampMatch( + "May 15, 2018 @ 17:14:56.374", + "CUSTOM_TIMESTAMP", + "\\b[A-Z]\\S{2} \\d{1,2}, \\d{4} @ \\d{2}:\\d{2}:\\d{2}\\.\\d{3}\\b", + "MMM d, yyyy @ HH:mm:ss.SSS", + 1526400896374L, + ecsCompatibility + ); + }; + + ecsCompatibilityModes.forEach(testFindFormatGivenOnlyKnownTimestampFormatAndEcsCompatibility); } public void testFindFormatGivenOnlySystemDate() { - validateTimestampMatch("1000000000000", "POSINT", "\\b\\d{13}\\b", "UNIX_MS", 1000000000000L); - validateTimestampMatch("1526400896374", "POSINT", "\\b\\d{13}\\b", "UNIX_MS", 1526400896374L); - validateTimestampMatch("2999999999999", "POSINT", "\\b\\d{13}\\b", "UNIX_MS", 2999999999999L); + Consumer testFindFormatGivenOnlySystemDateAndEcsCompatibility = (ecsCompatibility) -> { + validateTimestampMatch("1000000000000", "POSINT", "\\b\\d{13}\\b", "UNIX_MS", 1000000000000L, ecsCompatibility); + validateTimestampMatch("1526400896374", "POSINT", "\\b\\d{13}\\b", "UNIX_MS", 1526400896374L, ecsCompatibility); + validateTimestampMatch("2999999999999", "POSINT", "\\b\\d{13}\\b", "UNIX_MS", 2999999999999L, ecsCompatibility); + + validateTimestampMatch("1000000000", "NUMBER", "\\b\\d{10}\\b", "UNIX", 1000000000000L, ecsCompatibility); + validateTimestampMatch("1526400896.736", "NUMBER", "\\b\\d{10}\\b", "UNIX", 1526400896736L, ecsCompatibility); + validateTimestampMatch("1526400896", "NUMBER", "\\b\\d{10}\\b", "UNIX", 1526400896000L, ecsCompatibility); + validateTimestampMatch("2999999999.999", "NUMBER", "\\b\\d{10}\\b", "UNIX", 2999999999999L, ecsCompatibility); + + validateTimestampMatch( + "400000005afb078a164ac980", + "BASE16NUM", + "\\b[0-9A-Fa-f]{24}\\b", + "TAI64N", + 1526400896374L, + ecsCompatibility + ); + }; - validateTimestampMatch("1000000000", "NUMBER", "\\b\\d{10}\\b", "UNIX", 1000000000000L); - validateTimestampMatch("1526400896.736", "NUMBER", "\\b\\d{10}\\b", "UNIX", 1526400896736L); - validateTimestampMatch("1526400896", "NUMBER", "\\b\\d{10}\\b", "UNIX", 1526400896000L); - validateTimestampMatch("2999999999.999", "NUMBER", "\\b\\d{10}\\b", "UNIX", 2999999999999L); + ecsCompatibilityModes.forEach(testFindFormatGivenOnlySystemDateAndEcsCompatibility); - validateTimestampMatch("400000005afb078a164ac980", "BASE16NUM", "\\b[0-9A-Fa-f]{24}\\b", "TAI64N", 1526400896374L); } public void testCustomOverrideMatchingBuiltInFormat() { @@ -1599,14 +1715,16 @@ private void validateTimestampMatch( String expectedGrokPatternName, String expectedSimpleRegex, String expectedJavaTimestampFormat, - long expectedEpochMs + long expectedEpochMs, + boolean ecsCompatibility ) { validateTimestampMatch( text, expectedGrokPatternName, expectedSimpleRegex, Collections.singletonList(expectedJavaTimestampFormat), - expectedEpochMs + expectedEpochMs, + ecsCompatibility ); } @@ -1615,16 +1733,23 @@ private void validateTimestampMatch( String expectedGrokPatternName, String expectedSimpleRegex, List expectedJavaTimestampFormats, - long expectedEpochMs + long expectedEpochMs, + boolean ecsCompatibility ) { Pattern expectedSimplePattern = Pattern.compile(expectedSimpleRegex); assertTrue(expectedSimplePattern.matcher(text).find()); validateJavaTimestampFormats(expectedJavaTimestampFormats, text, expectedEpochMs); - TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder( + explanation, + true, + true, + true, + NOOP_TIMEOUT_CHECKER, + ecsCompatibility + ); strictTimestampFormatFinder.addSample(text); - assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); assertEquals(expectedSimplePattern.pattern(), strictTimestampFormatFinder.getSimplePattern().pattern()); assertEquals(expectedJavaTimestampFormats, strictTimestampFormatFinder.getJavaTimestampFormats()); assertEquals(1, strictTimestampFormatFinder.getNumMatchedFormats()); @@ -1634,14 +1759,27 @@ private void validateTimestampMatch( false, false, false, - NOOP_TIMEOUT_CHECKER + NOOP_TIMEOUT_CHECKER, + ecsCompatibility ); lenientTimestampFormatFinder.addSample(text); lenientTimestampFormatFinder.selectBestMatch(); - assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); assertEquals(expectedSimplePattern.pattern(), lenientTimestampFormatFinder.getSimplePattern().pattern()); assertEquals(expectedJavaTimestampFormats, lenientTimestampFormatFinder.getJavaTimestampFormats()); assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); + + if (ecsCompatibility) { + if ("TOMCAT_DATESTAMP".equals(expectedGrokPatternName)) { + assertEquals("TOMCATLEGACY_DATESTAMP", strictTimestampFormatFinder.getGrokPatternName()); + assertEquals("TOMCATLEGACY_DATESTAMP", lenientTimestampFormatFinder.getGrokPatternName()); + } else if ("CATALINA_DATESTAMP".equals(expectedGrokPatternName)) { + assertEquals("CATALINA7_DATESTAMP", strictTimestampFormatFinder.getGrokPatternName()); + assertEquals("CATALINA7_DATESTAMP", lenientTimestampFormatFinder.getGrokPatternName()); + } + } else { + assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); + assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); + } } private void validateFindInFullMessage( From 3c2fc5aab8e987584ceb39df4c6508b5d9bd2f6e Mon Sep 17 00:00:00 2001 From: William Brafford Date: Thu, 18 Aug 2022 11:42:09 -0400 Subject: [PATCH 261/265] Mute failing tests (#89465) Mute for https://github.com/elastic/elasticsearch/issues/89464 --- .../reservedstate/service/FileSettingsServiceIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index 4c10544e2a555..495d1f276ce77 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -158,6 +158,7 @@ public void testSettingsApplied() throws Exception { assertClusterStateSaveOK(savedClusterState); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/89464") public void testSettingsAppliedOnStart() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start data node / non master node"); From 20ed7e3fd9b492949c86b68ec637028c9f491da6 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Thu, 18 Aug 2022 12:54:03 -0500 Subject: [PATCH 262/265] Better support for multi cluster for run task (#89442) This commit introduces a `./gradlew run-ccs` task with the following goals: * mirror the ease of use of `./gradlew run` for manual cross cluster search development * same credentials * same well known ports * uses ccs specific naming * enable debugging across both clusters This is admittedly kinda hacky. Test clusters have support multi-cluster and are in use for for automated testing. There are some nuances that make that setup (and this setup) a bit cumbersome..specifically needing to read one cluster's config to configure another cluster. The run task adds a bit more config (well defined ports, etc.) than the tests need to so that also complicates this abit more. I found that without the additions here I was unable to get both sharing of cluster configuration (like in the [tests](https://github.com/elastic/elasticsearch/blob/main/qa/ccs-common-rest/build.gradle#L55)) and the run task's hard coded config to work well together. Hopefully the additions to the run task are not too hacky as I could not find any other way. --- .../main/groovy/elasticsearch.run-ccs.gradle | 60 +++++++++++++++++++ .../gradle/testclusters/RunTask.java | 39 +++++++++++- .../StandaloneRestIntegTestTask.java | 2 +- .../testclusters/TestClustersAware.java | 4 +- build.gradle | 1 + 5 files changed, 100 insertions(+), 6 deletions(-) create mode 100644 build-tools-internal/src/main/groovy/elasticsearch.run-ccs.gradle diff --git a/build-tools-internal/src/main/groovy/elasticsearch.run-ccs.gradle b/build-tools-internal/src/main/groovy/elasticsearch.run-ccs.gradle new file mode 100644 index 0000000000000..a137758e17f7b --- /dev/null +++ b/build-tools-internal/src/main/groovy/elasticsearch.run-ccs.gradle @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import org.elasticsearch.gradle.testclusters.DefaultTestClustersTask +import org.elasticsearch.gradle.testclusters.RunTask + +boolean proxyMode = true; + +def fulfillingCluster = testClusters.register('fulfilling-cluster') { + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + + user username: 'elastic-admin', password: 'elastic-password', role: '_es_test_root' +} + +def queryingCluster = testClusters.register('querying-cluster') { + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + if (proxyMode) { + setting 'cluster.remote.my_remote_cluster.mode', 'proxy' + setting 'cluster.remote.my_remote_cluster.proxy_address', { + "\"${fulfillingCluster.get().getAllTransportPortURI().get(0)}\"" + } + } else { + setting 'cluster.remote.my_remote_cluster.seeds', { + fulfillingCluster.get().getAllTransportPortURI().collect { "\"$it\"" }.toString() + } + } + setting 'cluster.remote.connections_per_cluster', "1" + + user username: 'elastic-admin', password: 'elastic-password', role: '_es_test_root' +} + +// the following task is needed to make sure the fulfilling cluster is fully configured before starting both clusters +// this allows the quering cluster to use configuration from the fulfilling cluster while honoring the RunTasks configuration (such as use port 9200) +tasks.register('initfulfillingCluster', RunTask) { + useCluster testClusters.named("fulfilling-cluster") + initOnly = true //only initialize the testCluster, don't start it + portOffset = 1 //when only initializing, instruct to use one above the normal ports to avoid collisions when other cluster also initializes + //debug = true //this task doesn't honor the command line options for run-ccs, so need to statically configure debug +} + +tasks.register("run-ccs", RunTask) { + dependsOn initfulfillingCluster + useCluster testClusters.named("fulfilling-cluster") + useCluster testClusters.named("querying-cluster") + doFirst { + println "** Querying cluster HTTP endpoints are: ${-> queryingCluster.get().allHttpSocketURI.join(",")}" + println "** Querying cluster transport endpoints are: ${-> queryingCluster.get().getAllTransportPortURI().join(",")}" + println "** Fulfilling cluster HTTP endpoints are: ${-> fulfillingCluster.get().allHttpSocketURI.join(",")}" + println "** Fulfilling cluster transport endpoints are: ${-> fulfillingCluster.get().getAllTransportPortURI().join(",")}" + } +} diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java index 37fb9305b1bc6..f77d41f6cfd39 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java @@ -35,12 +35,16 @@ public class RunTask extends DefaultTestClustersTask { private Boolean debug = false; + private Boolean initOnly = false; + private Boolean preserveData = false; private Path dataDir = null; private String keystorePassword = ""; + private Integer offset = 0; + @Option(option = "debug-jvm", description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch.") public void setDebug(boolean enabled) { this.debug = enabled; @@ -86,10 +90,36 @@ public String getDataDir() { return dataDir.toString(); } + @Input + @Optional + Boolean getInitOnly() { + return initOnly; + } + + /** + * Only initialize, but don't actually run. This is useful for multi-cluster run tasks. + */ + public void setInitOnly(Boolean initOnly) { + this.initOnly = initOnly; + } + + @Input + @Optional + public Integer getPortOffset() { + return offset; + } + + /** + * Manually increase the port offset. This is useful for multi-cluster run tasks. + */ + public void setPortOffset(Integer offset) { + this.offset = offset; + } + @Override public void beforeStart() { - int httpPort = 9200; - int transportPort = 9300; + int httpPort = 9200 + offset; + int transportPort = 9300 + offset; Map additionalSettings = System.getProperties() .entrySet() .stream() @@ -126,12 +156,15 @@ public void beforeStart() { } if (debug) { - enableDebug(); + enableDebug(getPortOffset()); } } @TaskAction public void runAndWait() throws IOException { + if (initOnly) { + return; + } List toRead = new ArrayList<>(); List aliveChecks = new ArrayList<>(); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java index 11ad0a29f5b8d..c28309a218b08 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -104,7 +104,7 @@ public WorkResult delete(Object... objects) { @Override public void beforeStart() { if (debugServer) { - enableDebug(); + enableDebug(0); } } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index 18f88b0dc4afc..550dcd6df8802 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -37,8 +37,8 @@ default void useCluster(Provider cluster) { default void beforeStart() {} - default void enableDebug() { - int debugPort = 5007; + default void enableDebug(int portOffset) { + int debugPort = 5007 + portOffset; for (ElasticsearchCluster cluster : getClusters()) { for (ElasticsearchNode node : cluster.getNodes()) { getLogger().lifecycle("Running elasticsearch in debug mode, {} expecting running debug server on port {}", node, debugPort); diff --git a/build.gradle b/build.gradle index 4d84dcbdb7ffd..e1e11e60e110e 100644 --- a/build.gradle +++ b/build.gradle @@ -42,6 +42,7 @@ plugins { id 'elasticsearch.fips' id 'elasticsearch.internal-testclusters' id 'elasticsearch.run' + id 'elasticsearch.run-ccs' id 'elasticsearch.release-tools' id 'elasticsearch.versions' } From f0df4b769a1c3f2fab7b78edf75e698dc96941f6 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Thu, 18 Aug 2022 19:44:35 +0100 Subject: [PATCH 263/265] Updates to changelog processing after docs redesign (#89463) --- .../release/ValidateChangelogEntryTask.java | 76 ++++++-- .../templates/release-highlights.asciidoc | 8 +- .../ReleaseHighlightsGeneratorTest.java | 8 +- .../ValidateChangelogEntryTaskTest.java | 179 ++++++++++++++++++ ...hlightsGeneratorTest.generateFile.asciidoc | 24 ++- docs/changelog/83345.yaml | 5 +- 6 files changed, 265 insertions(+), 35 deletions(-) create mode 100644 build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTaskTest.java diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java index 14114314ad4de..acbd79fe28194 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java @@ -8,6 +8,8 @@ package org.elasticsearch.gradle.internal.release; +import com.google.common.annotations.VisibleForTesting; + import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.file.ConfigurableFileCollection; @@ -30,6 +32,21 @@ public class ValidateChangelogEntryTask extends DefaultTask { private final ConfigurableFileCollection changelogs; private final ProjectLayout projectLayout; + public static final String TRIPLE_BACKTICK = "```"; + private static final String CODE_BLOCK_ERROR = """ + [%s] uses a triple-backtick in the [%s] section, but it must be + formatted as a Asciidoc code block. For example: + + [source,yaml] + ---- + { + "metrics.time" : 10, + "metrics.time.min" : 1, + "metrics.time.max" : 500 + } + ---- + """; + @Inject public ValidateChangelogEntryTask(ObjectFactory objectFactory, ProjectLayout projectLayout) { this.changelogs = objectFactory.fileCollection(); @@ -43,37 +60,60 @@ public void executeTask() { .stream() .collect(Collectors.toMap(file -> rootDir.relativize(file.toURI()).toString(), ChangelogEntry::parse)); + changelogs.forEach(ValidateChangelogEntryTask::validate); + } + + @VisibleForTesting + static void validate(String path, ChangelogEntry entry) { // We don't try to find all such errors, because we expect them to be rare e.g. only // when a new file is added. - changelogs.forEach((path, entry) -> { - final String type = entry.getType(); - - if (type.equals("known-issue") == false && type.equals("security") == false) { - if (entry.getPr() == null) { - throw new GradleException( - "[" + path + "] must provide a [pr] number (only 'known-issue' and " + "'security' entries can omit this" - ); - } - - if (entry.getArea() == null) { - throw new GradleException( - "[" + path + "] must provide an [area] (only 'known-issue' and " + "'security' entries can omit this" - ); - } + final String type = entry.getType(); + + if (type.equals("known-issue") == false && type.equals("security") == false) { + if (entry.getPr() == null) { + throw new GradleException( + "[" + path + "] must provide a [pr] number (only 'known-issue' and 'security' entries can omit this" + ); } - if ((type.equals("breaking") || type.equals("breaking-java")) && entry.getBreaking() == null) { + if (entry.getArea() == null) { + throw new GradleException("[" + path + "] must provide an [area] (only 'known-issue' and 'security' entries can omit this"); + } + } + + if (type.equals("breaking") || type.equals("breaking-java")) { + if (entry.getBreaking() == null) { throw new GradleException( "[" + path + "] has type [" + type + "] and must supply a [breaking] section with further information" ); } - if (type.equals("deprecation") && entry.getDeprecation() == null) { + if (entry.getBreaking().getDetails().contains(TRIPLE_BACKTICK)) { + throw new GradleException(CODE_BLOCK_ERROR.formatted(path, "breaking.details")); + } + if (entry.getBreaking().getImpact().contains(TRIPLE_BACKTICK)) { + throw new GradleException(CODE_BLOCK_ERROR.formatted(path, "breaking.impact")); + } + } + + if (type.equals("deprecation")) { + if (entry.getDeprecation() == null) { throw new GradleException( "[" + path + "] has type [deprecation] and must supply a [deprecation] section with further information" ); } - }); + + if (entry.getDeprecation().getDetails().contains(TRIPLE_BACKTICK)) { + throw new GradleException(CODE_BLOCK_ERROR.formatted(path, "deprecation.details")); + } + if (entry.getDeprecation().getImpact().contains(TRIPLE_BACKTICK)) { + throw new GradleException(CODE_BLOCK_ERROR.formatted(path, "deprecation.impact")); + } + } + + if (entry.getHighlight() != null && entry.getHighlight().getBody().contains(TRIPLE_BACKTICK)) { + throw new GradleException(CODE_BLOCK_ERROR.formatted(path, "highlight.body")); + } } @InputFiles diff --git a/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc b/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc index f07ba9c5d4db3..bd8ef8602530b 100644 --- a/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc +++ b/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc @@ -32,14 +32,18 @@ if (notableHighlights.isEmpty()) { %> <% for (highlight in notableHighlights) { %> [discrete] [[${ highlight.anchor }]] -=== {es-pull}${highlight.pr}[${highlight.title}] +=== ${highlight.title} ${highlight.body.trim()} + +{es-pull}${highlight.pr}[#${highlight.pr}] <% } %> // end::notable-highlights[] <% } %> <% for (highlight in nonNotableHighlights) { %> [discrete] [[${ highlight.anchor }]] -=== {es-pull}${highlight.pr}[${highlight.title}] +=== ${highlight.title} ${highlight.body.trim()} + +{es-pull}${highlight.pr}[#${highlight.pr}] <% } %> diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.java index 7f510bef22661..db39c6eea7e86 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.java @@ -60,11 +60,11 @@ public void generateFile_rendersCorrectMarkup() throws Exception { } private List getEntries() { - ChangelogEntry entry1 = makeChangelogEntry(1, true); - ChangelogEntry entry2 = makeChangelogEntry(2, true); - ChangelogEntry entry3 = makeChangelogEntry(3, false); + ChangelogEntry entry123 = makeChangelogEntry(123, true); + ChangelogEntry entry456 = makeChangelogEntry(456, true); + ChangelogEntry entry789 = makeChangelogEntry(789, false); // Return unordered list, to test correct re-ordering - return List.of(entry2, entry1, entry3); + return List.of(entry456, entry123, entry789); } private ChangelogEntry makeChangelogEntry(int pr, boolean notable) { diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTaskTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTaskTest.java new file mode 100644 index 0000000000000..ec7b47b057a97 --- /dev/null +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTaskTest.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.gradle.api.GradleException; +import org.hamcrest.Matchers; +import org.junit.jupiter.api.Test; + +import java.util.stream.Stream; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; + +class ValidateChangelogEntryTaskTest { + + @Test + void test_prNumber_isRequired() { + ChangelogEntry changelog = new ChangelogEntry(); + changelog.setType("enhancement"); + + final String message = doValidate(changelog); + + assertThat(message, endsWith("must provide a [pr] number (only 'known-issue' and 'security' entries can omit this")); + } + + @Test + void test_prNumber_notRequired() { + Stream.of("known-issue", "security").forEach(type -> { + ChangelogEntry changelog = new ChangelogEntry(); + changelog.setType(type); + + // Should not throw an exception! + ValidateChangelogEntryTask.validate("", changelog); + }); + } + + @Test + void test_area_isRequired() { + final ChangelogEntry changelog = new ChangelogEntry(); + changelog.setType("enhancement"); + changelog.setPr(123); + + final String message = doValidate(changelog); + + assertThat(message, endsWith("must provide an [area] (only 'known-issue' and 'security' entries can omit this")); + } + + @Test + void test_breaking_requiresBreakingSection() { + Stream.of("breaking", "breaking-java").forEach(type -> { + final ChangelogEntry changelog = buildChangelog(type); + + final String message = doValidate(changelog); + + assertThat(message, endsWith("has type [" + type + "] and must supply a [breaking] section with further information")); + }); + } + + @Test + void test_breaking_rejectsTripleBackticksInDetails() { + Stream.of("breaking", "breaking-java").forEach(type -> { + final ChangelogEntry.Breaking breaking = new ChangelogEntry.Breaking(); + breaking.setDetails(""" + Some waffle. + ``` + I AM CODE! + ``` + """); + + final ChangelogEntry changelog = buildChangelog(type); + changelog.setBreaking(breaking); + + final String message = doValidate(changelog); + + assertThat(message, containsString("uses a triple-backtick in the [breaking.details] section")); + }); + } + + @Test + void test_breaking_rejectsTripleBackticksInImpact() { + Stream.of("breaking", "breaking-java").forEach(type -> { + final ChangelogEntry.Breaking breaking = new ChangelogEntry.Breaking(); + breaking.setDetails("Waffle waffle"); + breaking.setImpact(""" + More waffle. + ``` + THERE ARE WEASEL RAKING THROUGH MY GARBAGE! + ``` + """); + + final ChangelogEntry changelog = buildChangelog(type); + changelog.setBreaking(breaking); + + final String message = doValidate(changelog); + + assertThat(message, containsString("uses a triple-backtick in the [breaking.impact] section")); + }); + } + + @Test + void test_deprecation_rejectsTripleBackticksInImpact() { + final ChangelogEntry.Deprecation deprecation = new ChangelogEntry.Deprecation(); + deprecation.setDetails("Waffle waffle"); + deprecation.setImpact(""" + More waffle. + ``` + THERE ARE WEASEL RAKING THROUGH MY GARBAGE! + ``` + """); + + final ChangelogEntry changelog = buildChangelog("deprecation"); + changelog.setDeprecation(deprecation); + + final String message = doValidate(changelog); + + assertThat(message, containsString("uses a triple-backtick in the [deprecation.impact] section")); + } + + @Test + void test_deprecation_rejectsTripleBackticksInDetails() { + final ChangelogEntry.Deprecation deprecation = new ChangelogEntry.Deprecation(); + deprecation.setDetails(""" + Some waffle. + ``` + I AM CODE! + ``` + """); + + final ChangelogEntry changelog = buildChangelog("deprecation"); + changelog.setDeprecation(deprecation); + + final String message = doValidate(changelog); + + assertThat(message, containsString("uses a triple-backtick in the [deprecation.details] section")); + } + + @Test + void test_highlight_rejectsTripleBackticksInBody() { + final ChangelogEntry.Highlight highlight = new ChangelogEntry.Highlight(); + highlight.setBody(""" + Some waffle. + ``` + I AM CODE! + ``` + """); + + final ChangelogEntry changelog = buildChangelog("enhancement"); + changelog.setHighlight(highlight); + + final String message = doValidate(changelog); + + assertThat(message, containsString("uses a triple-backtick in the [highlight.body] section")); + } + + private static ChangelogEntry buildChangelog(String type) { + final ChangelogEntry changelog = new ChangelogEntry(); + changelog.setType(type); + changelog.setPr(123); + changelog.setArea("Infra/Core"); + return changelog; + } + + private String doValidate(ChangelogEntry entry) { + try { + ValidateChangelogEntryTask.validate("docs/123.yaml", entry); + throw new AssertionError("No exception thrown!"); + } catch (Exception e) { + assertThat(e, Matchers.instanceOf(GradleException.class)); + return e.getMessage(); + } + } +} diff --git a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc index a55a590a8bca5..19c713042a42b 100644 --- a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc +++ b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc @@ -20,20 +20,26 @@ Other versions: // tag::notable-highlights[] [discrete] -[[notable_release_highlight_number_1]] -=== {es-pull}1[Notable release highlight number 1] -Notable release body number 1 +[[notable_release_highlight_number_123]] +=== Notable release highlight number 123 +Notable release body number 123 + +{es-pull}123[#123] [discrete] -[[notable_release_highlight_number_2]] -=== {es-pull}2[Notable release highlight number 2] -Notable release body number 2 +[[notable_release_highlight_number_456]] +=== Notable release highlight number 456 +Notable release body number 456 + +{es-pull}456[#456] // end::notable-highlights[] [discrete] -[[notable_release_highlight_number_3]] -=== {es-pull}3[Notable release highlight number 3] -Notable release body number 3 +[[notable_release_highlight_number_789]] +=== Notable release highlight number 789 +Notable release body number 789 + +{es-pull}789[#789] diff --git a/docs/changelog/83345.yaml b/docs/changelog/83345.yaml index 25e49cd882719..955e31a3c1929 100644 --- a/docs/changelog/83345.yaml +++ b/docs/changelog/83345.yaml @@ -14,7 +14,8 @@ highlight: As an example, the following ILM policy would roll an index over if it is at least 7 days old or at least 100 gigabytes, but only as long as the index is not empty. - ``` + [source,console] + ---- PUT _ilm/policy/my_policy { "policy": { @@ -31,5 +32,5 @@ highlight: } } } - ``` + ---- notable: true From a1015ce8b1e53653bc6282849800cd8db2d42fa5 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 18 Aug 2022 13:20:19 -0700 Subject: [PATCH 264/265] Add periodic job for single processor node testing --- ...arch+periodic+single-processor-node-tests.yml | 16 ++++++++++++++++ ...h+periodic+single-processor-tests-trigger.yml | 6 ++++++ 2 files changed, 22 insertions(+) create mode 100644 .ci/jobs.t/elastic+elasticsearch+periodic+single-processor-node-tests.yml create mode 100644 .ci/jobs.t/elastic+elasticsearch+periodic+single-processor-tests-trigger.yml diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+single-processor-node-tests.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+single-processor-node-tests.yml new file mode 100644 index 0000000000000..66b12f380c701 --- /dev/null +++ b/.ci/jobs.t/elastic+elasticsearch+periodic+single-processor-node-tests.yml @@ -0,0 +1,16 @@ +--- +- job: + name: elastic+elasticsearch+%BRANCH%+periodic+single-processor-node-tests + display-name: "elastic / elasticsearch # %BRANCH% - single processor node tests" + description: "Testing with node.processors set to '1' for the Elasticsearch %BRANCH% branch.\n" + node: "general-purpose && docker" + builders: + - inject: + properties-file: '.ci/java-versions.properties' + properties-content: | + JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA + RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + JAVA11_HOME=$HOME/.java/java11 + - shell: | + #!/usr/local/bin/runbld --redirect-stderr + $WORKSPACE/.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.configure_test_clusters_with_one_processor=true check diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+single-processor-tests-trigger.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+single-processor-tests-trigger.yml new file mode 100644 index 0000000000000..40ad9e9dd5446 --- /dev/null +++ b/.ci/jobs.t/elastic+elasticsearch+periodic+single-processor-tests-trigger.yml @@ -0,0 +1,6 @@ +--- +jjbb-template: periodic-trigger-lgc.yml +vars: + - periodic-job: elastic+elasticsearch+%BRANCH%+periodic+single-processor-node-tests + - lgc-job: elastic+elasticsearch+%BRANCH%+intake + - cron: "H H/12 * * *" From e949dff8d690abedcad11b13c2870b5fda1692b7 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 18 Aug 2022 15:00:57 -0700 Subject: [PATCH 265/265] Disable openid connect tests due to missing fixture (#89478) Relates to https://github.com/elastic/elasticsearch/issues/89477. For now just disable these tests since they are guaranteed to fail. --- x-pack/qa/oidc-op-tests/build.gradle | 3 +++ x-pack/test/idp-fixture/docker-compose.yml | 21 +++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/x-pack/qa/oidc-op-tests/build.gradle b/x-pack/qa/oidc-op-tests/build.gradle index 131739c4a1334..82ca259ee89ec 100644 --- a/x-pack/qa/oidc-op-tests/build.gradle +++ b/x-pack/qa/oidc-op-tests/build.gradle @@ -21,4 +21,7 @@ tasks.named("processJavaRestTestResources").configure { tasks.named("javaRestTest").configure { // OpenID Connect fixture does not support aarm64 onlyIf { Architecture.current() == Architecture.X64 } + + // AwaitsFix: https://github.com/elastic/elasticsearch/issues/89477 + enabled = false } diff --git a/x-pack/test/idp-fixture/docker-compose.yml b/x-pack/test/idp-fixture/docker-compose.yml index feccc6fae0061..041707c87cf80 100644 --- a/x-pack/test/idp-fixture/docker-compose.yml +++ b/x-pack/test/idp-fixture/docker-compose.yml @@ -161,16 +161,17 @@ services: - ./idp/shibboleth-idp/metadata:/opt/shibboleth-idp/metadata - ./idp/shib-jetty-base/start.d/ssl.ini:/opt/shib-jetty-base/start.d/ssl.ini - oidc-provider: - image: "c2id/c2id-server:9.5" - depends_on: - - http-proxy - ports: - - "8080" - expose: - - "8080" - volumes: - - ./oidc/override.properties:/etc/c2id/override.properties +# c2id/c2id-server image is no longer available +# oidc-provider: +# image: "c2id/c2id-server:9.5" +# depends_on: +# - http-proxy +# ports: +# - "8080" +# expose: +# - "8080" +# volumes: +# - ./oidc/override.properties:/etc/c2id/override.properties http-proxy: image: "nginx:latest"